]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
net: use DMA_x_DEVICE and dma_mapping_error with skb_frag_dma_map
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     120
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "August 18, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = 0; i < 8; i++) {
632                 if (i == TG3_APE_LOCK_GPIO)
633                         continue;
634                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
635         }
636
637         /* Clear the correct bit of the GPIO lock too. */
638         if (!tp->pci_fn)
639                 bit = APE_LOCK_GRANT_DRIVER;
640         else
641                 bit = 1 << tp->pci_fn;
642
643         tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
644 }
645
646 static int tg3_ape_lock(struct tg3 *tp, int locknum)
647 {
648         int i, off;
649         int ret = 0;
650         u32 status, req, gnt, bit;
651
652         if (!tg3_flag(tp, ENABLE_APE))
653                 return 0;
654
655         switch (locknum) {
656         case TG3_APE_LOCK_GPIO:
657                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
658                         return 0;
659         case TG3_APE_LOCK_GRC:
660         case TG3_APE_LOCK_MEM:
661                 break;
662         default:
663                 return -EINVAL;
664         }
665
666         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
667                 req = TG3_APE_LOCK_REQ;
668                 gnt = TG3_APE_LOCK_GRANT;
669         } else {
670                 req = TG3_APE_PER_LOCK_REQ;
671                 gnt = TG3_APE_PER_LOCK_GRANT;
672         }
673
674         off = 4 * locknum;
675
676         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
677                 bit = APE_LOCK_REQ_DRIVER;
678         else
679                 bit = 1 << tp->pci_fn;
680
681         tg3_ape_write32(tp, req + off, bit);
682
683         /* Wait for up to 1 millisecond to acquire lock. */
684         for (i = 0; i < 100; i++) {
685                 status = tg3_ape_read32(tp, gnt + off);
686                 if (status == bit)
687                         break;
688                 udelay(10);
689         }
690
691         if (status != bit) {
692                 /* Revoke the lock request. */
693                 tg3_ape_write32(tp, gnt + off, bit);
694                 ret = -EBUSY;
695         }
696
697         return ret;
698 }
699
700 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
701 {
702         u32 gnt, bit;
703
704         if (!tg3_flag(tp, ENABLE_APE))
705                 return;
706
707         switch (locknum) {
708         case TG3_APE_LOCK_GPIO:
709                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
710                         return;
711         case TG3_APE_LOCK_GRC:
712         case TG3_APE_LOCK_MEM:
713                 break;
714         default:
715                 return;
716         }
717
718         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
719                 gnt = TG3_APE_LOCK_GRANT;
720         else
721                 gnt = TG3_APE_PER_LOCK_GRANT;
722
723         if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
724                 bit = APE_LOCK_GRANT_DRIVER;
725         else
726                 bit = 1 << tp->pci_fn;
727
728         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
729 }
730
731 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
732 {
733         int i;
734         u32 apedata;
735
736         /* NCSI does not support APE events */
737         if (tg3_flag(tp, APE_HAS_NCSI))
738                 return;
739
740         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
741         if (apedata != APE_SEG_SIG_MAGIC)
742                 return;
743
744         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
745         if (!(apedata & APE_FW_STATUS_READY))
746                 return;
747
748         /* Wait for up to 1 millisecond for APE to service previous event. */
749         for (i = 0; i < 10; i++) {
750                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
751                         return;
752
753                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
754
755                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
756                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
757                                         event | APE_EVENT_STATUS_EVENT_PENDING);
758
759                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
760
761                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
762                         break;
763
764                 udelay(100);
765         }
766
767         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
768                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
769 }
770
771 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
772 {
773         u32 event;
774         u32 apedata;
775
776         if (!tg3_flag(tp, ENABLE_APE))
777                 return;
778
779         switch (kind) {
780         case RESET_KIND_INIT:
781                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
782                                 APE_HOST_SEG_SIG_MAGIC);
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
784                                 APE_HOST_SEG_LEN_MAGIC);
785                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
786                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
787                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
788                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
789                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
790                                 APE_HOST_BEHAV_NO_PHYLOCK);
791                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
792                                     TG3_APE_HOST_DRVR_STATE_START);
793
794                 event = APE_EVENT_STATUS_STATE_START;
795                 break;
796         case RESET_KIND_SHUTDOWN:
797                 /* With the interface we are currently using,
798                  * APE does not track driver state.  Wiping
799                  * out the HOST SEGMENT SIGNATURE forces
800                  * the APE to assume OS absent status.
801                  */
802                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
803
804                 if (device_may_wakeup(&tp->pdev->dev) &&
805                     tg3_flag(tp, WOL_ENABLE)) {
806                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
807                                             TG3_APE_HOST_WOL_SPEED_AUTO);
808                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
809                 } else
810                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
811
812                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
813
814                 event = APE_EVENT_STATUS_STATE_UNLOAD;
815                 break;
816         case RESET_KIND_SUSPEND:
817                 event = APE_EVENT_STATUS_STATE_SUSPEND;
818                 break;
819         default:
820                 return;
821         }
822
823         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
824
825         tg3_ape_send_event(tp, event);
826 }
827
828 static void tg3_disable_ints(struct tg3 *tp)
829 {
830         int i;
831
832         tw32(TG3PCI_MISC_HOST_CTRL,
833              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
834         for (i = 0; i < tp->irq_max; i++)
835                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
836 }
837
838 static void tg3_enable_ints(struct tg3 *tp)
839 {
840         int i;
841
842         tp->irq_sync = 0;
843         wmb();
844
845         tw32(TG3PCI_MISC_HOST_CTRL,
846              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
847
848         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
849         for (i = 0; i < tp->irq_cnt; i++) {
850                 struct tg3_napi *tnapi = &tp->napi[i];
851
852                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
853                 if (tg3_flag(tp, 1SHOT_MSI))
854                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855
856                 tp->coal_now |= tnapi->coal_now;
857         }
858
859         /* Force an initial interrupt */
860         if (!tg3_flag(tp, TAGGED_STATUS) &&
861             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
862                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
863         else
864                 tw32(HOSTCC_MODE, tp->coal_now);
865
866         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
867 }
868
869 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
870 {
871         struct tg3 *tp = tnapi->tp;
872         struct tg3_hw_status *sblk = tnapi->hw_status;
873         unsigned int work_exists = 0;
874
875         /* check for phy events */
876         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
877                 if (sblk->status & SD_STATUS_LINK_CHG)
878                         work_exists = 1;
879         }
880         /* check for RX/TX work to do */
881         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
882             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
883                 work_exists = 1;
884
885         return work_exists;
886 }
887
888 /* tg3_int_reenable
889  *  similar to tg3_enable_ints, but it accurately determines whether there
890  *  is new work pending and can return without flushing the PIO write
891  *  which reenables interrupts
892  */
893 static void tg3_int_reenable(struct tg3_napi *tnapi)
894 {
895         struct tg3 *tp = tnapi->tp;
896
897         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
898         mmiowb();
899
900         /* When doing tagged status, this work check is unnecessary.
901          * The last_tag we write above tells the chip which piece of
902          * work we've completed.
903          */
904         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
905                 tw32(HOSTCC_MODE, tp->coalesce_mode |
906                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
907 }
908
909 static void tg3_switch_clocks(struct tg3 *tp)
910 {
911         u32 clock_ctrl;
912         u32 orig_clock_ctrl;
913
914         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
915                 return;
916
917         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
918
919         orig_clock_ctrl = clock_ctrl;
920         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
921                        CLOCK_CTRL_CLKRUN_OENABLE |
922                        0x1f);
923         tp->pci_clock_ctrl = clock_ctrl;
924
925         if (tg3_flag(tp, 5705_PLUS)) {
926                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
927                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
928                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
929                 }
930         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
931                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
932                             clock_ctrl |
933                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
934                             40);
935                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
937                             40);
938         }
939         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
940 }
941
942 #define PHY_BUSY_LOOPS  5000
943
944 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
945 {
946         u32 frame_val;
947         unsigned int loops;
948         int ret;
949
950         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
951                 tw32_f(MAC_MI_MODE,
952                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
953                 udelay(80);
954         }
955
956         *val = 0x0;
957
958         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
959                       MI_COM_PHY_ADDR_MASK);
960         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
961                       MI_COM_REG_ADDR_MASK);
962         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
963
964         tw32_f(MAC_MI_COM, frame_val);
965
966         loops = PHY_BUSY_LOOPS;
967         while (loops != 0) {
968                 udelay(10);
969                 frame_val = tr32(MAC_MI_COM);
970
971                 if ((frame_val & MI_COM_BUSY) == 0) {
972                         udelay(5);
973                         frame_val = tr32(MAC_MI_COM);
974                         break;
975                 }
976                 loops -= 1;
977         }
978
979         ret = -EBUSY;
980         if (loops != 0) {
981                 *val = frame_val & MI_COM_DATA_MASK;
982                 ret = 0;
983         }
984
985         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
986                 tw32_f(MAC_MI_MODE, tp->mi_mode);
987                 udelay(80);
988         }
989
990         return ret;
991 }
992
993 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
994 {
995         u32 frame_val;
996         unsigned int loops;
997         int ret;
998
999         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1000             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1001                 return 0;
1002
1003         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1004                 tw32_f(MAC_MI_MODE,
1005                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1006                 udelay(80);
1007         }
1008
1009         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1010                       MI_COM_PHY_ADDR_MASK);
1011         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1012                       MI_COM_REG_ADDR_MASK);
1013         frame_val |= (val & MI_COM_DATA_MASK);
1014         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1015
1016         tw32_f(MAC_MI_COM, frame_val);
1017
1018         loops = PHY_BUSY_LOOPS;
1019         while (loops != 0) {
1020                 udelay(10);
1021                 frame_val = tr32(MAC_MI_COM);
1022                 if ((frame_val & MI_COM_BUSY) == 0) {
1023                         udelay(5);
1024                         frame_val = tr32(MAC_MI_COM);
1025                         break;
1026                 }
1027                 loops -= 1;
1028         }
1029
1030         ret = -EBUSY;
1031         if (loops != 0)
1032                 ret = 0;
1033
1034         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1035                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1036                 udelay(80);
1037         }
1038
1039         return ret;
1040 }
1041
1042 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1043 {
1044         int err;
1045
1046         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1047         if (err)
1048                 goto done;
1049
1050         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1051         if (err)
1052                 goto done;
1053
1054         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1055                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1056         if (err)
1057                 goto done;
1058
1059         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1060
1061 done:
1062         return err;
1063 }
1064
1065 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1066 {
1067         int err;
1068
1069         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1070         if (err)
1071                 goto done;
1072
1073         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1074         if (err)
1075                 goto done;
1076
1077         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1078                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1079         if (err)
1080                 goto done;
1081
1082         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1083
1084 done:
1085         return err;
1086 }
1087
1088 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1089 {
1090         int err;
1091
1092         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1093         if (!err)
1094                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1095
1096         return err;
1097 }
1098
1099 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1100 {
1101         int err;
1102
1103         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1104         if (!err)
1105                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1106
1107         return err;
1108 }
1109
1110 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1111 {
1112         int err;
1113
1114         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1115                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1116                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1117         if (!err)
1118                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1119
1120         return err;
1121 }
1122
1123 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1124 {
1125         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1126                 set |= MII_TG3_AUXCTL_MISC_WREN;
1127
1128         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1129 }
1130
1131 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1132         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1133                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1134                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1135
1136 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1137         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1138                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1139
1140 static int tg3_bmcr_reset(struct tg3 *tp)
1141 {
1142         u32 phy_control;
1143         int limit, err;
1144
1145         /* OK, reset it, and poll the BMCR_RESET bit until it
1146          * clears or we time out.
1147          */
1148         phy_control = BMCR_RESET;
1149         err = tg3_writephy(tp, MII_BMCR, phy_control);
1150         if (err != 0)
1151                 return -EBUSY;
1152
1153         limit = 5000;
1154         while (limit--) {
1155                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1156                 if (err != 0)
1157                         return -EBUSY;
1158
1159                 if ((phy_control & BMCR_RESET) == 0) {
1160                         udelay(40);
1161                         break;
1162                 }
1163                 udelay(10);
1164         }
1165         if (limit < 0)
1166                 return -EBUSY;
1167
1168         return 0;
1169 }
1170
1171 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1172 {
1173         struct tg3 *tp = bp->priv;
1174         u32 val;
1175
1176         spin_lock_bh(&tp->lock);
1177
1178         if (tg3_readphy(tp, reg, &val))
1179                 val = -EIO;
1180
1181         spin_unlock_bh(&tp->lock);
1182
1183         return val;
1184 }
1185
1186 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1187 {
1188         struct tg3 *tp = bp->priv;
1189         u32 ret = 0;
1190
1191         spin_lock_bh(&tp->lock);
1192
1193         if (tg3_writephy(tp, reg, val))
1194                 ret = -EIO;
1195
1196         spin_unlock_bh(&tp->lock);
1197
1198         return ret;
1199 }
1200
1201 static int tg3_mdio_reset(struct mii_bus *bp)
1202 {
1203         return 0;
1204 }
1205
1206 static void tg3_mdio_config_5785(struct tg3 *tp)
1207 {
1208         u32 val;
1209         struct phy_device *phydev;
1210
1211         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1212         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1213         case PHY_ID_BCM50610:
1214         case PHY_ID_BCM50610M:
1215                 val = MAC_PHYCFG2_50610_LED_MODES;
1216                 break;
1217         case PHY_ID_BCMAC131:
1218                 val = MAC_PHYCFG2_AC131_LED_MODES;
1219                 break;
1220         case PHY_ID_RTL8211C:
1221                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1222                 break;
1223         case PHY_ID_RTL8201E:
1224                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1225                 break;
1226         default:
1227                 return;
1228         }
1229
1230         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1231                 tw32(MAC_PHYCFG2, val);
1232
1233                 val = tr32(MAC_PHYCFG1);
1234                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1235                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1236                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1237                 tw32(MAC_PHYCFG1, val);
1238
1239                 return;
1240         }
1241
1242         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1243                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1244                        MAC_PHYCFG2_FMODE_MASK_MASK |
1245                        MAC_PHYCFG2_GMODE_MASK_MASK |
1246                        MAC_PHYCFG2_ACT_MASK_MASK   |
1247                        MAC_PHYCFG2_QUAL_MASK_MASK |
1248                        MAC_PHYCFG2_INBAND_ENABLE;
1249
1250         tw32(MAC_PHYCFG2, val);
1251
1252         val = tr32(MAC_PHYCFG1);
1253         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1254                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1255         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1256                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1257                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1260         }
1261         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1262                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1263         tw32(MAC_PHYCFG1, val);
1264
1265         val = tr32(MAC_EXT_RGMII_MODE);
1266         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1267                  MAC_RGMII_MODE_RX_QUALITY |
1268                  MAC_RGMII_MODE_RX_ACTIVITY |
1269                  MAC_RGMII_MODE_RX_ENG_DET |
1270                  MAC_RGMII_MODE_TX_ENABLE |
1271                  MAC_RGMII_MODE_TX_LOWPWR |
1272                  MAC_RGMII_MODE_TX_RESET);
1273         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1274                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1275                         val |= MAC_RGMII_MODE_RX_INT_B |
1276                                MAC_RGMII_MODE_RX_QUALITY |
1277                                MAC_RGMII_MODE_RX_ACTIVITY |
1278                                MAC_RGMII_MODE_RX_ENG_DET;
1279                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1280                         val |= MAC_RGMII_MODE_TX_ENABLE |
1281                                MAC_RGMII_MODE_TX_LOWPWR |
1282                                MAC_RGMII_MODE_TX_RESET;
1283         }
1284         tw32(MAC_EXT_RGMII_MODE, val);
1285 }
1286
1287 static void tg3_mdio_start(struct tg3 *tp)
1288 {
1289         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1290         tw32_f(MAC_MI_MODE, tp->mi_mode);
1291         udelay(80);
1292
1293         if (tg3_flag(tp, MDIOBUS_INITED) &&
1294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1295                 tg3_mdio_config_5785(tp);
1296 }
1297
1298 static int tg3_mdio_init(struct tg3 *tp)
1299 {
1300         int i;
1301         u32 reg;
1302         struct phy_device *phydev;
1303
1304         if (tg3_flag(tp, 5717_PLUS)) {
1305                 u32 is_serdes;
1306
1307                 tp->phy_addr = tp->pci_fn + 1;
1308
1309                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1310                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1311                 else
1312                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1313                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1314                 if (is_serdes)
1315                         tp->phy_addr += 7;
1316         } else
1317                 tp->phy_addr = TG3_PHY_MII_ADDR;
1318
1319         tg3_mdio_start(tp);
1320
1321         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1322                 return 0;
1323
1324         tp->mdio_bus = mdiobus_alloc();
1325         if (tp->mdio_bus == NULL)
1326                 return -ENOMEM;
1327
1328         tp->mdio_bus->name     = "tg3 mdio bus";
1329         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1330                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1331         tp->mdio_bus->priv     = tp;
1332         tp->mdio_bus->parent   = &tp->pdev->dev;
1333         tp->mdio_bus->read     = &tg3_mdio_read;
1334         tp->mdio_bus->write    = &tg3_mdio_write;
1335         tp->mdio_bus->reset    = &tg3_mdio_reset;
1336         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1337         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1338
1339         for (i = 0; i < PHY_MAX_ADDR; i++)
1340                 tp->mdio_bus->irq[i] = PHY_POLL;
1341
1342         /* The bus registration will look for all the PHYs on the mdio bus.
1343          * Unfortunately, it does not ensure the PHY is powered up before
1344          * accessing the PHY ID registers.  A chip reset is the
1345          * quickest way to bring the device back to an operational state..
1346          */
1347         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1348                 tg3_bmcr_reset(tp);
1349
1350         i = mdiobus_register(tp->mdio_bus);
1351         if (i) {
1352                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1353                 mdiobus_free(tp->mdio_bus);
1354                 return i;
1355         }
1356
1357         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1358
1359         if (!phydev || !phydev->drv) {
1360                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1361                 mdiobus_unregister(tp->mdio_bus);
1362                 mdiobus_free(tp->mdio_bus);
1363                 return -ENODEV;
1364         }
1365
1366         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1367         case PHY_ID_BCM57780:
1368                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1369                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1370                 break;
1371         case PHY_ID_BCM50610:
1372         case PHY_ID_BCM50610M:
1373                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1374                                      PHY_BRCM_RX_REFCLK_UNUSED |
1375                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1376                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1377                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1378                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1379                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1380                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1383                 /* fallthru */
1384         case PHY_ID_RTL8211C:
1385                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1386                 break;
1387         case PHY_ID_RTL8201E:
1388         case PHY_ID_BCMAC131:
1389                 phydev->interface = PHY_INTERFACE_MODE_MII;
1390                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1391                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1392                 break;
1393         }
1394
1395         tg3_flag_set(tp, MDIOBUS_INITED);
1396
1397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1398                 tg3_mdio_config_5785(tp);
1399
1400         return 0;
1401 }
1402
1403 static void tg3_mdio_fini(struct tg3 *tp)
1404 {
1405         if (tg3_flag(tp, MDIOBUS_INITED)) {
1406                 tg3_flag_clear(tp, MDIOBUS_INITED);
1407                 mdiobus_unregister(tp->mdio_bus);
1408                 mdiobus_free(tp->mdio_bus);
1409         }
1410 }
1411
1412 /* tp->lock is held. */
1413 static inline void tg3_generate_fw_event(struct tg3 *tp)
1414 {
1415         u32 val;
1416
1417         val = tr32(GRC_RX_CPU_EVENT);
1418         val |= GRC_RX_CPU_DRIVER_EVENT;
1419         tw32_f(GRC_RX_CPU_EVENT, val);
1420
1421         tp->last_event_jiffies = jiffies;
1422 }
1423
1424 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1425
1426 /* tp->lock is held. */
1427 static void tg3_wait_for_event_ack(struct tg3 *tp)
1428 {
1429         int i;
1430         unsigned int delay_cnt;
1431         long time_remain;
1432
1433         /* If enough time has passed, no wait is necessary. */
1434         time_remain = (long)(tp->last_event_jiffies + 1 +
1435                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1436                       (long)jiffies;
1437         if (time_remain < 0)
1438                 return;
1439
1440         /* Check if we can shorten the wait time. */
1441         delay_cnt = jiffies_to_usecs(time_remain);
1442         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1443                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1444         delay_cnt = (delay_cnt >> 3) + 1;
1445
1446         for (i = 0; i < delay_cnt; i++) {
1447                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1448                         break;
1449                 udelay(8);
1450         }
1451 }
1452
1453 /* tp->lock is held. */
1454 static void tg3_ump_link_report(struct tg3 *tp)
1455 {
1456         u32 reg;
1457         u32 val;
1458
1459         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1460                 return;
1461
1462         tg3_wait_for_event_ack(tp);
1463
1464         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1467
1468         val = 0;
1469         if (!tg3_readphy(tp, MII_BMCR, &reg))
1470                 val = reg << 16;
1471         if (!tg3_readphy(tp, MII_BMSR, &reg))
1472                 val |= (reg & 0xffff);
1473         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1474
1475         val = 0;
1476         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1477                 val = reg << 16;
1478         if (!tg3_readphy(tp, MII_LPA, &reg))
1479                 val |= (reg & 0xffff);
1480         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1481
1482         val = 0;
1483         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1484                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1485                         val = reg << 16;
1486                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1487                         val |= (reg & 0xffff);
1488         }
1489         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1490
1491         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1492                 val = reg << 16;
1493         else
1494                 val = 0;
1495         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1496
1497         tg3_generate_fw_event(tp);
1498 }
1499
1500 /* tp->lock is held. */
1501 static void tg3_stop_fw(struct tg3 *tp)
1502 {
1503         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1504                 /* Wait for RX cpu to ACK the previous event. */
1505                 tg3_wait_for_event_ack(tp);
1506
1507                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1508
1509                 tg3_generate_fw_event(tp);
1510
1511                 /* Wait for RX cpu to ACK this event. */
1512                 tg3_wait_for_event_ack(tp);
1513         }
1514 }
1515
1516 /* tp->lock is held. */
1517 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1518 {
1519         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1520                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1521
1522         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1523                 switch (kind) {
1524                 case RESET_KIND_INIT:
1525                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1526                                       DRV_STATE_START);
1527                         break;
1528
1529                 case RESET_KIND_SHUTDOWN:
1530                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1531                                       DRV_STATE_UNLOAD);
1532                         break;
1533
1534                 case RESET_KIND_SUSPEND:
1535                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1536                                       DRV_STATE_SUSPEND);
1537                         break;
1538
1539                 default:
1540                         break;
1541                 }
1542         }
1543
1544         if (kind == RESET_KIND_INIT ||
1545             kind == RESET_KIND_SUSPEND)
1546                 tg3_ape_driver_state_change(tp, kind);
1547 }
1548
1549 /* tp->lock is held. */
1550 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1551 {
1552         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1553                 switch (kind) {
1554                 case RESET_KIND_INIT:
1555                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1556                                       DRV_STATE_START_DONE);
1557                         break;
1558
1559                 case RESET_KIND_SHUTDOWN:
1560                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561                                       DRV_STATE_UNLOAD_DONE);
1562                         break;
1563
1564                 default:
1565                         break;
1566                 }
1567         }
1568
1569         if (kind == RESET_KIND_SHUTDOWN)
1570                 tg3_ape_driver_state_change(tp, kind);
1571 }
1572
1573 /* tp->lock is held. */
1574 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1575 {
1576         if (tg3_flag(tp, ENABLE_ASF)) {
1577                 switch (kind) {
1578                 case RESET_KIND_INIT:
1579                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580                                       DRV_STATE_START);
1581                         break;
1582
1583                 case RESET_KIND_SHUTDOWN:
1584                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1585                                       DRV_STATE_UNLOAD);
1586                         break;
1587
1588                 case RESET_KIND_SUSPEND:
1589                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1590                                       DRV_STATE_SUSPEND);
1591                         break;
1592
1593                 default:
1594                         break;
1595                 }
1596         }
1597 }
1598
1599 static int tg3_poll_fw(struct tg3 *tp)
1600 {
1601         int i;
1602         u32 val;
1603
1604         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1605                 /* Wait up to 20ms for init done. */
1606                 for (i = 0; i < 200; i++) {
1607                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1608                                 return 0;
1609                         udelay(100);
1610                 }
1611                 return -ENODEV;
1612         }
1613
1614         /* Wait for firmware initialization to complete. */
1615         for (i = 0; i < 100000; i++) {
1616                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1617                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1618                         break;
1619                 udelay(10);
1620         }
1621
1622         /* Chip might not be fitted with firmware.  Some Sun onboard
1623          * parts are configured like that.  So don't signal the timeout
1624          * of the above loop as an error, but do report the lack of
1625          * running firmware once.
1626          */
1627         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1628                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1629
1630                 netdev_info(tp->dev, "No firmware running\n");
1631         }
1632
1633         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1634                 /* The 57765 A0 needs a little more
1635                  * time to do some important work.
1636                  */
1637                 mdelay(10);
1638         }
1639
1640         return 0;
1641 }
1642
1643 static void tg3_link_report(struct tg3 *tp)
1644 {
1645         if (!netif_carrier_ok(tp->dev)) {
1646                 netif_info(tp, link, tp->dev, "Link is down\n");
1647                 tg3_ump_link_report(tp);
1648         } else if (netif_msg_link(tp)) {
1649                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1650                             (tp->link_config.active_speed == SPEED_1000 ?
1651                              1000 :
1652                              (tp->link_config.active_speed == SPEED_100 ?
1653                               100 : 10)),
1654                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1655                              "full" : "half"));
1656
1657                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1658                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1659                             "on" : "off",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1661                             "on" : "off");
1662
1663                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1664                         netdev_info(tp->dev, "EEE is %s\n",
1665                                     tp->setlpicnt ? "enabled" : "disabled");
1666
1667                 tg3_ump_link_report(tp);
1668         }
1669 }
1670
1671 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1672 {
1673         u16 miireg;
1674
1675         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1676                 miireg = ADVERTISE_PAUSE_CAP;
1677         else if (flow_ctrl & FLOW_CTRL_TX)
1678                 miireg = ADVERTISE_PAUSE_ASYM;
1679         else if (flow_ctrl & FLOW_CTRL_RX)
1680                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1681         else
1682                 miireg = 0;
1683
1684         return miireg;
1685 }
1686
1687 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1688 {
1689         u16 miireg;
1690
1691         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1692                 miireg = ADVERTISE_1000XPAUSE;
1693         else if (flow_ctrl & FLOW_CTRL_TX)
1694                 miireg = ADVERTISE_1000XPSE_ASYM;
1695         else if (flow_ctrl & FLOW_CTRL_RX)
1696                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1697         else
1698                 miireg = 0;
1699
1700         return miireg;
1701 }
1702
1703 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1704 {
1705         u8 cap = 0;
1706
1707         if (lcladv & ADVERTISE_1000XPAUSE) {
1708                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1709                         if (rmtadv & LPA_1000XPAUSE)
1710                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1711                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1712                                 cap = FLOW_CTRL_RX;
1713                 } else {
1714                         if (rmtadv & LPA_1000XPAUSE)
1715                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1716                 }
1717         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1718                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1719                         cap = FLOW_CTRL_TX;
1720         }
1721
1722         return cap;
1723 }
1724
1725 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1726 {
1727         u8 autoneg;
1728         u8 flowctrl = 0;
1729         u32 old_rx_mode = tp->rx_mode;
1730         u32 old_tx_mode = tp->tx_mode;
1731
1732         if (tg3_flag(tp, USE_PHYLIB))
1733                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1734         else
1735                 autoneg = tp->link_config.autoneg;
1736
1737         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1738                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1739                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1740                 else
1741                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1742         } else
1743                 flowctrl = tp->link_config.flowctrl;
1744
1745         tp->link_config.active_flowctrl = flowctrl;
1746
1747         if (flowctrl & FLOW_CTRL_RX)
1748                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1749         else
1750                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1751
1752         if (old_rx_mode != tp->rx_mode)
1753                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1754
1755         if (flowctrl & FLOW_CTRL_TX)
1756                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1757         else
1758                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1759
1760         if (old_tx_mode != tp->tx_mode)
1761                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1762 }
1763
1764 static void tg3_adjust_link(struct net_device *dev)
1765 {
1766         u8 oldflowctrl, linkmesg = 0;
1767         u32 mac_mode, lcl_adv, rmt_adv;
1768         struct tg3 *tp = netdev_priv(dev);
1769         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1770
1771         spin_lock_bh(&tp->lock);
1772
1773         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1774                                     MAC_MODE_HALF_DUPLEX);
1775
1776         oldflowctrl = tp->link_config.active_flowctrl;
1777
1778         if (phydev->link) {
1779                 lcl_adv = 0;
1780                 rmt_adv = 0;
1781
1782                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1783                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1784                 else if (phydev->speed == SPEED_1000 ||
1785                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1786                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1787                 else
1788                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1789
1790                 if (phydev->duplex == DUPLEX_HALF)
1791                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1792                 else {
1793                         lcl_adv = tg3_advert_flowctrl_1000T(
1794                                   tp->link_config.flowctrl);
1795
1796                         if (phydev->pause)
1797                                 rmt_adv = LPA_PAUSE_CAP;
1798                         if (phydev->asym_pause)
1799                                 rmt_adv |= LPA_PAUSE_ASYM;
1800                 }
1801
1802                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1803         } else
1804                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1805
1806         if (mac_mode != tp->mac_mode) {
1807                 tp->mac_mode = mac_mode;
1808                 tw32_f(MAC_MODE, tp->mac_mode);
1809                 udelay(40);
1810         }
1811
1812         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1813                 if (phydev->speed == SPEED_10)
1814                         tw32(MAC_MI_STAT,
1815                              MAC_MI_STAT_10MBPS_MODE |
1816                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1817                 else
1818                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1819         }
1820
1821         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1822                 tw32(MAC_TX_LENGTHS,
1823                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824                       (6 << TX_LENGTHS_IPG_SHIFT) |
1825                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1826         else
1827                 tw32(MAC_TX_LENGTHS,
1828                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1829                       (6 << TX_LENGTHS_IPG_SHIFT) |
1830                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1831
1832         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1833             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1834             phydev->speed != tp->link_config.active_speed ||
1835             phydev->duplex != tp->link_config.active_duplex ||
1836             oldflowctrl != tp->link_config.active_flowctrl)
1837                 linkmesg = 1;
1838
1839         tp->link_config.active_speed = phydev->speed;
1840         tp->link_config.active_duplex = phydev->duplex;
1841
1842         spin_unlock_bh(&tp->lock);
1843
1844         if (linkmesg)
1845                 tg3_link_report(tp);
1846 }
1847
1848 static int tg3_phy_init(struct tg3 *tp)
1849 {
1850         struct phy_device *phydev;
1851
1852         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1853                 return 0;
1854
1855         /* Bring the PHY back to a known state. */
1856         tg3_bmcr_reset(tp);
1857
1858         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1859
1860         /* Attach the MAC to the PHY. */
1861         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1862                              phydev->dev_flags, phydev->interface);
1863         if (IS_ERR(phydev)) {
1864                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1865                 return PTR_ERR(phydev);
1866         }
1867
1868         /* Mask with MAC supported features. */
1869         switch (phydev->interface) {
1870         case PHY_INTERFACE_MODE_GMII:
1871         case PHY_INTERFACE_MODE_RGMII:
1872                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1873                         phydev->supported &= (PHY_GBIT_FEATURES |
1874                                               SUPPORTED_Pause |
1875                                               SUPPORTED_Asym_Pause);
1876                         break;
1877                 }
1878                 /* fallthru */
1879         case PHY_INTERFACE_MODE_MII:
1880                 phydev->supported &= (PHY_BASIC_FEATURES |
1881                                       SUPPORTED_Pause |
1882                                       SUPPORTED_Asym_Pause);
1883                 break;
1884         default:
1885                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1886                 return -EINVAL;
1887         }
1888
1889         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1890
1891         phydev->advertising = phydev->supported;
1892
1893         return 0;
1894 }
1895
1896 static void tg3_phy_start(struct tg3 *tp)
1897 {
1898         struct phy_device *phydev;
1899
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1904
1905         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1906                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1907                 phydev->speed = tp->link_config.orig_speed;
1908                 phydev->duplex = tp->link_config.orig_duplex;
1909                 phydev->autoneg = tp->link_config.orig_autoneg;
1910                 phydev->advertising = tp->link_config.orig_advertising;
1911         }
1912
1913         phy_start(phydev);
1914
1915         phy_start_aneg(phydev);
1916 }
1917
1918 static void tg3_phy_stop(struct tg3 *tp)
1919 {
1920         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1921                 return;
1922
1923         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1924 }
1925
1926 static void tg3_phy_fini(struct tg3 *tp)
1927 {
1928         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1929                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1930                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1931         }
1932 }
1933
1934 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1935 {
1936         int err;
1937         u32 val;
1938
1939         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1940                 return 0;
1941
1942         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1943                 /* Cannot do read-modify-write on 5401 */
1944                 err = tg3_phy_auxctl_write(tp,
1945                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1946                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1947                                            0x4c20);
1948                 goto done;
1949         }
1950
1951         err = tg3_phy_auxctl_read(tp,
1952                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1953         if (err)
1954                 return err;
1955
1956         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1957         err = tg3_phy_auxctl_write(tp,
1958                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1959
1960 done:
1961         return err;
1962 }
1963
1964 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 phytest;
1967
1968         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1969                 u32 phy;
1970
1971                 tg3_writephy(tp, MII_TG3_FET_TEST,
1972                              phytest | MII_TG3_FET_SHADOW_EN);
1973                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1974                         if (enable)
1975                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1976                         else
1977                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1978                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1979                 }
1980                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1981         }
1982 }
1983
1984 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1985 {
1986         u32 reg;
1987
1988         if (!tg3_flag(tp, 5705_PLUS) ||
1989             (tg3_flag(tp, 5717_PLUS) &&
1990              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1991                 return;
1992
1993         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1994                 tg3_phy_fet_toggle_apd(tp, enable);
1995                 return;
1996         }
1997
1998         reg = MII_TG3_MISC_SHDW_WREN |
1999               MII_TG3_MISC_SHDW_SCR5_SEL |
2000               MII_TG3_MISC_SHDW_SCR5_LPED |
2001               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2002               MII_TG3_MISC_SHDW_SCR5_SDTL |
2003               MII_TG3_MISC_SHDW_SCR5_C125OE;
2004         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2005                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2006
2007         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2008
2009
2010         reg = MII_TG3_MISC_SHDW_WREN |
2011               MII_TG3_MISC_SHDW_APD_SEL |
2012               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2013         if (enable)
2014                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2015
2016         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2017 }
2018
2019 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2020 {
2021         u32 phy;
2022
2023         if (!tg3_flag(tp, 5705_PLUS) ||
2024             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2025                 return;
2026
2027         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2028                 u32 ephy;
2029
2030                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2031                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2032
2033                         tg3_writephy(tp, MII_TG3_FET_TEST,
2034                                      ephy | MII_TG3_FET_SHADOW_EN);
2035                         if (!tg3_readphy(tp, reg, &phy)) {
2036                                 if (enable)
2037                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2038                                 else
2039                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2040                                 tg3_writephy(tp, reg, phy);
2041                         }
2042                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2043                 }
2044         } else {
2045                 int ret;
2046
2047                 ret = tg3_phy_auxctl_read(tp,
2048                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2049                 if (!ret) {
2050                         if (enable)
2051                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2052                         else
2053                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2054                         tg3_phy_auxctl_write(tp,
2055                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2056                 }
2057         }
2058 }
2059
2060 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2061 {
2062         int ret;
2063         u32 val;
2064
2065         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2066                 return;
2067
2068         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2069         if (!ret)
2070                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2071                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2072 }
2073
2074 static void tg3_phy_apply_otp(struct tg3 *tp)
2075 {
2076         u32 otp, phy;
2077
2078         if (!tp->phy_otp)
2079                 return;
2080
2081         otp = tp->phy_otp;
2082
2083         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2084                 return;
2085
2086         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2087         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2088         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2089
2090         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2091               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2093
2094         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2095         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2096         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2097
2098         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2099         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2100
2101         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2102         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2103
2104         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2105               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2106         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2107
2108         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2109 }
2110
2111 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2112 {
2113         u32 val;
2114
2115         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2116                 return;
2117
2118         tp->setlpicnt = 0;
2119
2120         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2121             current_link_up == 1 &&
2122             tp->link_config.active_duplex == DUPLEX_FULL &&
2123             (tp->link_config.active_speed == SPEED_100 ||
2124              tp->link_config.active_speed == SPEED_1000)) {
2125                 u32 eeectl;
2126
2127                 if (tp->link_config.active_speed == SPEED_1000)
2128                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2129                 else
2130                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2131
2132                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2133
2134                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2135                                   TG3_CL45_D7_EEERES_STAT, &val);
2136
2137                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2138                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2139                         tp->setlpicnt = 2;
2140         }
2141
2142         if (!tp->setlpicnt) {
2143                 if (current_link_up == 1 &&
2144                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2145                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2146                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147                 }
2148
2149                 val = tr32(TG3_CPMU_EEE_MODE);
2150                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2151         }
2152 }
2153
2154 static void tg3_phy_eee_enable(struct tg3 *tp)
2155 {
2156         u32 val;
2157
2158         if (tp->link_config.active_speed == SPEED_1000 &&
2159             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2160              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2161              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2162             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2163                 val = MII_TG3_DSP_TAP26_ALNOKO |
2164                       MII_TG3_DSP_TAP26_RMRXSTO;
2165                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2166                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2167         }
2168
2169         val = tr32(TG3_CPMU_EEE_MODE);
2170         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2171 }
2172
2173 static int tg3_wait_macro_done(struct tg3 *tp)
2174 {
2175         int limit = 100;
2176
2177         while (limit--) {
2178                 u32 tmp32;
2179
2180                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2181                         if ((tmp32 & 0x1000) == 0)
2182                                 break;
2183                 }
2184         }
2185         if (limit < 0)
2186                 return -EBUSY;
2187
2188         return 0;
2189 }
2190
2191 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2192 {
2193         static const u32 test_pat[4][6] = {
2194         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2195         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2196         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2197         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2198         };
2199         int chan;
2200
2201         for (chan = 0; chan < 4; chan++) {
2202                 int i;
2203
2204                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2205                              (chan * 0x2000) | 0x0200);
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2207
2208                 for (i = 0; i < 6; i++)
2209                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2210                                      test_pat[chan][i]);
2211
2212                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2213                 if (tg3_wait_macro_done(tp)) {
2214                         *resetp = 1;
2215                         return -EBUSY;
2216                 }
2217
2218                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2219                              (chan * 0x2000) | 0x0200);
2220                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2221                 if (tg3_wait_macro_done(tp)) {
2222                         *resetp = 1;
2223                         return -EBUSY;
2224                 }
2225
2226                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2227                 if (tg3_wait_macro_done(tp)) {
2228                         *resetp = 1;
2229                         return -EBUSY;
2230                 }
2231
2232                 for (i = 0; i < 6; i += 2) {
2233                         u32 low, high;
2234
2235                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2236                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2237                             tg3_wait_macro_done(tp)) {
2238                                 *resetp = 1;
2239                                 return -EBUSY;
2240                         }
2241                         low &= 0x7fff;
2242                         high &= 0x000f;
2243                         if (low != test_pat[chan][i] ||
2244                             high != test_pat[chan][i+1]) {
2245                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2246                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2247                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2248
2249                                 return -EBUSY;
2250                         }
2251                 }
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2258 {
2259         int chan;
2260
2261         for (chan = 0; chan < 4; chan++) {
2262                 int i;
2263
2264                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2265                              (chan * 0x2000) | 0x0200);
2266                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2267                 for (i = 0; i < 6; i++)
2268                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2269                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2270                 if (tg3_wait_macro_done(tp))
2271                         return -EBUSY;
2272         }
2273
2274         return 0;
2275 }
2276
2277 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2278 {
2279         u32 reg32, phy9_orig;
2280         int retries, do_phy_reset, err;
2281
2282         retries = 10;
2283         do_phy_reset = 1;
2284         do {
2285                 if (do_phy_reset) {
2286                         err = tg3_bmcr_reset(tp);
2287                         if (err)
2288                                 return err;
2289                         do_phy_reset = 0;
2290                 }
2291
2292                 /* Disable transmitter and interrupt.  */
2293                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2294                         continue;
2295
2296                 reg32 |= 0x3000;
2297                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2298
2299                 /* Set full-duplex, 1000 mbps.  */
2300                 tg3_writephy(tp, MII_BMCR,
2301                              BMCR_FULLDPLX | BMCR_SPEED1000);
2302
2303                 /* Set to master mode.  */
2304                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2305                         continue;
2306
2307                 tg3_writephy(tp, MII_CTRL1000,
2308                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2309
2310                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2311                 if (err)
2312                         return err;
2313
2314                 /* Block the PHY control access.  */
2315                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2316
2317                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2318                 if (!err)
2319                         break;
2320         } while (--retries);
2321
2322         err = tg3_phy_reset_chanpat(tp);
2323         if (err)
2324                 return err;
2325
2326         tg3_phydsp_write(tp, 0x8005, 0x0000);
2327
2328         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2329         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2330
2331         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2332
2333         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2334
2335         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2336                 reg32 &= ~0x3000;
2337                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2338         } else if (!err)
2339                 err = -EBUSY;
2340
2341         return err;
2342 }
2343
2344 /* This will reset the tigon3 PHY if there is no valid
2345  * link unless the FORCE argument is non-zero.
2346  */
2347 static int tg3_phy_reset(struct tg3 *tp)
2348 {
2349         u32 val, cpmuctrl;
2350         int err;
2351
2352         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2353                 val = tr32(GRC_MISC_CFG);
2354                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2355                 udelay(40);
2356         }
2357         err  = tg3_readphy(tp, MII_BMSR, &val);
2358         err |= tg3_readphy(tp, MII_BMSR, &val);
2359         if (err != 0)
2360                 return -EBUSY;
2361
2362         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2363                 netif_carrier_off(tp->dev);
2364                 tg3_link_report(tp);
2365         }
2366
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2368             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2370                 err = tg3_phy_reset_5703_4_5(tp);
2371                 if (err)
2372                         return err;
2373                 goto out;
2374         }
2375
2376         cpmuctrl = 0;
2377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2378             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2379                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2380                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2381                         tw32(TG3_CPMU_CTRL,
2382                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2383         }
2384
2385         err = tg3_bmcr_reset(tp);
2386         if (err)
2387                 return err;
2388
2389         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2390                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2391                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2392
2393                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2394         }
2395
2396         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2397             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2398                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2399                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2400                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2401                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2402                         udelay(40);
2403                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2404                 }
2405         }
2406
2407         if (tg3_flag(tp, 5717_PLUS) &&
2408             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2409                 return 0;
2410
2411         tg3_phy_apply_otp(tp);
2412
2413         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2414                 tg3_phy_toggle_apd(tp, true);
2415         else
2416                 tg3_phy_toggle_apd(tp, false);
2417
2418 out:
2419         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2420             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2421                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2422                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2423                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2424         }
2425
2426         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2427                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2428                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2429         }
2430
2431         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2432                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2433                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2434                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2435                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2436                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2437                 }
2438         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2439                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2440                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2441                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2442                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2443                                 tg3_writephy(tp, MII_TG3_TEST1,
2444                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2445                         } else
2446                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2447
2448                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2449                 }
2450         }
2451
2452         /* Set Extended packet length bit (bit 14) on all chips that */
2453         /* support jumbo frames */
2454         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2455                 /* Cannot do read-modify-write on 5401 */
2456                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2457         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2458                 /* Set bit 14 with read-modify-write to preserve other bits */
2459                 err = tg3_phy_auxctl_read(tp,
2460                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2461                 if (!err)
2462                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2463                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2464         }
2465
2466         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2467          * jumbo frames transmission.
2468          */
2469         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2470                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2471                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2472                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2473         }
2474
2475         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2476                 /* adjust output voltage */
2477                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2478         }
2479
2480         tg3_phy_toggle_automdix(tp, 1);
2481         tg3_phy_set_wirespeed(tp);
2482         return 0;
2483 }
2484
2485 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2486 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2487 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2488                                           TG3_GPIO_MSG_NEED_VAUX)
2489 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2490         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2491          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2492          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2493          (TG3_GPIO_MSG_DRVR_PRES << 12))
2494
2495 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2496         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2497          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2498          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2499          (TG3_GPIO_MSG_NEED_VAUX << 12))
2500
2501 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2502 {
2503         u32 status, shift;
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2508         else
2509                 status = tr32(TG3_CPMU_DRV_STATUS);
2510
2511         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2512         status &= ~(TG3_GPIO_MSG_MASK << shift);
2513         status |= (newstat << shift);
2514
2515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2516             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2517                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2518         else
2519                 tw32(TG3_CPMU_DRV_STATUS, status);
2520
2521         return status >> TG3_APE_GPIO_MSG_SHIFT;
2522 }
2523
2524 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2525 {
2526         if (!tg3_flag(tp, IS_NIC))
2527                 return 0;
2528
2529         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2530             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2531             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2532                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2533                         return -EIO;
2534
2535                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2536
2537                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2539
2540                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2541         } else {
2542                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2543                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2544         }
2545
2546         return 0;
2547 }
2548
2549 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2550 {
2551         u32 grc_local_ctrl;
2552
2553         if (!tg3_flag(tp, IS_NIC) ||
2554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2555             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2556                 return;
2557
2558         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2559
2560         tw32_wait_f(GRC_LOCAL_CTRL,
2561                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2562                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2563
2564         tw32_wait_f(GRC_LOCAL_CTRL,
2565                     grc_local_ctrl,
2566                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2567
2568         tw32_wait_f(GRC_LOCAL_CTRL,
2569                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2570                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2571 }
2572
2573 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2574 {
2575         if (!tg3_flag(tp, IS_NIC))
2576                 return;
2577
2578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2579             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2580                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2581                             (GRC_LCLCTRL_GPIO_OE0 |
2582                              GRC_LCLCTRL_GPIO_OE1 |
2583                              GRC_LCLCTRL_GPIO_OE2 |
2584                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2585                              GRC_LCLCTRL_GPIO_OUTPUT1),
2586                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2587         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2588                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2589                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2590                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2591                                      GRC_LCLCTRL_GPIO_OE1 |
2592                                      GRC_LCLCTRL_GPIO_OE2 |
2593                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2594                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2595                                      tp->grc_local_ctrl;
2596                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2597                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2598
2599                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2600                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2601                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2602
2603                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2604                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2605                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2606         } else {
2607                 u32 no_gpio2;
2608                 u32 grc_local_ctrl = 0;
2609
2610                 /* Workaround to prevent overdrawing Amps. */
2611                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2612                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2613                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2614                                     grc_local_ctrl,
2615                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2616                 }
2617
2618                 /* On 5753 and variants, GPIO2 cannot be used. */
2619                 no_gpio2 = tp->nic_sram_data_cfg &
2620                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2621
2622                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2623                                   GRC_LCLCTRL_GPIO_OE1 |
2624                                   GRC_LCLCTRL_GPIO_OE2 |
2625                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2626                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2627                 if (no_gpio2) {
2628                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2629                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2630                 }
2631                 tw32_wait_f(GRC_LOCAL_CTRL,
2632                             tp->grc_local_ctrl | grc_local_ctrl,
2633                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2634
2635                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2636
2637                 tw32_wait_f(GRC_LOCAL_CTRL,
2638                             tp->grc_local_ctrl | grc_local_ctrl,
2639                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2640
2641                 if (!no_gpio2) {
2642                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2643                         tw32_wait_f(GRC_LOCAL_CTRL,
2644                                     tp->grc_local_ctrl | grc_local_ctrl,
2645                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2646                 }
2647         }
2648 }
2649
2650 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2651 {
2652         u32 msg = 0;
2653
2654         /* Serialize power state transitions */
2655         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2656                 return;
2657
2658         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2659                 msg = TG3_GPIO_MSG_NEED_VAUX;
2660
2661         msg = tg3_set_function_status(tp, msg);
2662
2663         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2664                 goto done;
2665
2666         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2667                 tg3_pwrsrc_switch_to_vaux(tp);
2668         else
2669                 tg3_pwrsrc_die_with_vmain(tp);
2670
2671 done:
2672         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2673 }
2674
2675 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2676 {
2677         bool need_vaux = false;
2678
2679         /* The GPIOs do something completely different on 57765. */
2680         if (!tg3_flag(tp, IS_NIC) ||
2681             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2682                 return;
2683
2684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687                 tg3_frob_aux_power_5717(tp, include_wol ?
2688                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2689                 return;
2690         }
2691
2692         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2693                 struct net_device *dev_peer;
2694
2695                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2696
2697                 /* remove_one() may have been run on the peer. */
2698                 if (dev_peer) {
2699                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2700
2701                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2702                                 return;
2703
2704                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2705                             tg3_flag(tp_peer, ENABLE_ASF))
2706                                 need_vaux = true;
2707                 }
2708         }
2709
2710         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2711             tg3_flag(tp, ENABLE_ASF))
2712                 need_vaux = true;
2713
2714         if (need_vaux)
2715                 tg3_pwrsrc_switch_to_vaux(tp);
2716         else
2717                 tg3_pwrsrc_die_with_vmain(tp);
2718 }
2719
2720 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2721 {
2722         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2723                 return 1;
2724         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2725                 if (speed != SPEED_10)
2726                         return 1;
2727         } else if (speed == SPEED_10)
2728                 return 1;
2729
2730         return 0;
2731 }
2732
2733 static int tg3_setup_phy(struct tg3 *, int);
2734 static int tg3_halt_cpu(struct tg3 *, u32);
2735
2736 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2737 {
2738         u32 val;
2739
2740         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2741                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2742                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2743                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2744
2745                         sg_dig_ctrl |=
2746                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2747                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2748                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2749                 }
2750                 return;
2751         }
2752
2753         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2754                 tg3_bmcr_reset(tp);
2755                 val = tr32(GRC_MISC_CFG);
2756                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2757                 udelay(40);
2758                 return;
2759         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2760                 u32 phytest;
2761                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2762                         u32 phy;
2763
2764                         tg3_writephy(tp, MII_ADVERTISE, 0);
2765                         tg3_writephy(tp, MII_BMCR,
2766                                      BMCR_ANENABLE | BMCR_ANRESTART);
2767
2768                         tg3_writephy(tp, MII_TG3_FET_TEST,
2769                                      phytest | MII_TG3_FET_SHADOW_EN);
2770                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2771                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2772                                 tg3_writephy(tp,
2773                                              MII_TG3_FET_SHDW_AUXMODE4,
2774                                              phy);
2775                         }
2776                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2777                 }
2778                 return;
2779         } else if (do_low_power) {
2780                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2781                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2782
2783                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2784                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2785                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2786                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2787         }
2788
2789         /* The PHY should not be powered down on some chips because
2790          * of bugs.
2791          */
2792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2793             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2794             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2795              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2796                 return;
2797
2798         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2799             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2800                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2801                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2802                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2803                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2804         }
2805
2806         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2807 }
2808
2809 /* tp->lock is held. */
2810 static int tg3_nvram_lock(struct tg3 *tp)
2811 {
2812         if (tg3_flag(tp, NVRAM)) {
2813                 int i;
2814
2815                 if (tp->nvram_lock_cnt == 0) {
2816                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2817                         for (i = 0; i < 8000; i++) {
2818                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2819                                         break;
2820                                 udelay(20);
2821                         }
2822                         if (i == 8000) {
2823                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2824                                 return -ENODEV;
2825                         }
2826                 }
2827                 tp->nvram_lock_cnt++;
2828         }
2829         return 0;
2830 }
2831
2832 /* tp->lock is held. */
2833 static void tg3_nvram_unlock(struct tg3 *tp)
2834 {
2835         if (tg3_flag(tp, NVRAM)) {
2836                 if (tp->nvram_lock_cnt > 0)
2837                         tp->nvram_lock_cnt--;
2838                 if (tp->nvram_lock_cnt == 0)
2839                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2840         }
2841 }
2842
2843 /* tp->lock is held. */
2844 static void tg3_enable_nvram_access(struct tg3 *tp)
2845 {
2846         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2847                 u32 nvaccess = tr32(NVRAM_ACCESS);
2848
2849                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2850         }
2851 }
2852
2853 /* tp->lock is held. */
2854 static void tg3_disable_nvram_access(struct tg3 *tp)
2855 {
2856         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2857                 u32 nvaccess = tr32(NVRAM_ACCESS);
2858
2859                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2860         }
2861 }
2862
2863 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2864                                         u32 offset, u32 *val)
2865 {
2866         u32 tmp;
2867         int i;
2868
2869         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2870                 return -EINVAL;
2871
2872         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2873                                         EEPROM_ADDR_DEVID_MASK |
2874                                         EEPROM_ADDR_READ);
2875         tw32(GRC_EEPROM_ADDR,
2876              tmp |
2877              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2878              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2879               EEPROM_ADDR_ADDR_MASK) |
2880              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2881
2882         for (i = 0; i < 1000; i++) {
2883                 tmp = tr32(GRC_EEPROM_ADDR);
2884
2885                 if (tmp & EEPROM_ADDR_COMPLETE)
2886                         break;
2887                 msleep(1);
2888         }
2889         if (!(tmp & EEPROM_ADDR_COMPLETE))
2890                 return -EBUSY;
2891
2892         tmp = tr32(GRC_EEPROM_DATA);
2893
2894         /*
2895          * The data will always be opposite the native endian
2896          * format.  Perform a blind byteswap to compensate.
2897          */
2898         *val = swab32(tmp);
2899
2900         return 0;
2901 }
2902
2903 #define NVRAM_CMD_TIMEOUT 10000
2904
2905 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2906 {
2907         int i;
2908
2909         tw32(NVRAM_CMD, nvram_cmd);
2910         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2911                 udelay(10);
2912                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2913                         udelay(10);
2914                         break;
2915                 }
2916         }
2917
2918         if (i == NVRAM_CMD_TIMEOUT)
2919                 return -EBUSY;
2920
2921         return 0;
2922 }
2923
2924 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2925 {
2926         if (tg3_flag(tp, NVRAM) &&
2927             tg3_flag(tp, NVRAM_BUFFERED) &&
2928             tg3_flag(tp, FLASH) &&
2929             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2930             (tp->nvram_jedecnum == JEDEC_ATMEL))
2931
2932                 addr = ((addr / tp->nvram_pagesize) <<
2933                         ATMEL_AT45DB0X1B_PAGE_POS) +
2934                        (addr % tp->nvram_pagesize);
2935
2936         return addr;
2937 }
2938
2939 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2940 {
2941         if (tg3_flag(tp, NVRAM) &&
2942             tg3_flag(tp, NVRAM_BUFFERED) &&
2943             tg3_flag(tp, FLASH) &&
2944             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2945             (tp->nvram_jedecnum == JEDEC_ATMEL))
2946
2947                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2948                         tp->nvram_pagesize) +
2949                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2950
2951         return addr;
2952 }
2953
2954 /* NOTE: Data read in from NVRAM is byteswapped according to
2955  * the byteswapping settings for all other register accesses.
2956  * tg3 devices are BE devices, so on a BE machine, the data
2957  * returned will be exactly as it is seen in NVRAM.  On a LE
2958  * machine, the 32-bit value will be byteswapped.
2959  */
2960 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2961 {
2962         int ret;
2963
2964         if (!tg3_flag(tp, NVRAM))
2965                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2966
2967         offset = tg3_nvram_phys_addr(tp, offset);
2968
2969         if (offset > NVRAM_ADDR_MSK)
2970                 return -EINVAL;
2971
2972         ret = tg3_nvram_lock(tp);
2973         if (ret)
2974                 return ret;
2975
2976         tg3_enable_nvram_access(tp);
2977
2978         tw32(NVRAM_ADDR, offset);
2979         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2980                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2981
2982         if (ret == 0)
2983                 *val = tr32(NVRAM_RDDATA);
2984
2985         tg3_disable_nvram_access(tp);
2986
2987         tg3_nvram_unlock(tp);
2988
2989         return ret;
2990 }
2991
2992 /* Ensures NVRAM data is in bytestream format. */
2993 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2994 {
2995         u32 v;
2996         int res = tg3_nvram_read(tp, offset, &v);
2997         if (!res)
2998                 *val = cpu_to_be32(v);
2999         return res;
3000 }
3001
3002 #define RX_CPU_SCRATCH_BASE     0x30000
3003 #define RX_CPU_SCRATCH_SIZE     0x04000
3004 #define TX_CPU_SCRATCH_BASE     0x34000
3005 #define TX_CPU_SCRATCH_SIZE     0x04000
3006
3007 /* tp->lock is held. */
3008 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3009 {
3010         int i;
3011
3012         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3013
3014         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3015                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3016
3017                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3018                 return 0;
3019         }
3020         if (offset == RX_CPU_BASE) {
3021                 for (i = 0; i < 10000; i++) {
3022                         tw32(offset + CPU_STATE, 0xffffffff);
3023                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3024                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3025                                 break;
3026                 }
3027
3028                 tw32(offset + CPU_STATE, 0xffffffff);
3029                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3030                 udelay(10);
3031         } else {
3032                 for (i = 0; i < 10000; i++) {
3033                         tw32(offset + CPU_STATE, 0xffffffff);
3034                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3035                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3036                                 break;
3037                 }
3038         }
3039
3040         if (i >= 10000) {
3041                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3042                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3043                 return -ENODEV;
3044         }
3045
3046         /* Clear firmware's nvram arbitration. */
3047         if (tg3_flag(tp, NVRAM))
3048                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3049         return 0;
3050 }
3051
3052 struct fw_info {
3053         unsigned int fw_base;
3054         unsigned int fw_len;
3055         const __be32 *fw_data;
3056 };
3057
3058 /* tp->lock is held. */
3059 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3060                                  u32 cpu_scratch_base, int cpu_scratch_size,
3061                                  struct fw_info *info)
3062 {
3063         int err, lock_err, i;
3064         void (*write_op)(struct tg3 *, u32, u32);
3065
3066         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3067                 netdev_err(tp->dev,
3068                            "%s: Trying to load TX cpu firmware which is 5705\n",
3069                            __func__);
3070                 return -EINVAL;
3071         }
3072
3073         if (tg3_flag(tp, 5705_PLUS))
3074                 write_op = tg3_write_mem;
3075         else
3076                 write_op = tg3_write_indirect_reg32;
3077
3078         /* It is possible that bootcode is still loading at this point.
3079          * Get the nvram lock first before halting the cpu.
3080          */
3081         lock_err = tg3_nvram_lock(tp);
3082         err = tg3_halt_cpu(tp, cpu_base);
3083         if (!lock_err)
3084                 tg3_nvram_unlock(tp);
3085         if (err)
3086                 goto out;
3087
3088         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3089                 write_op(tp, cpu_scratch_base + i, 0);
3090         tw32(cpu_base + CPU_STATE, 0xffffffff);
3091         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3092         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3093                 write_op(tp, (cpu_scratch_base +
3094                               (info->fw_base & 0xffff) +
3095                               (i * sizeof(u32))),
3096                               be32_to_cpu(info->fw_data[i]));
3097
3098         err = 0;
3099
3100 out:
3101         return err;
3102 }
3103
3104 /* tp->lock is held. */
3105 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3106 {
3107         struct fw_info info;
3108         const __be32 *fw_data;
3109         int err, i;
3110
3111         fw_data = (void *)tp->fw->data;
3112
3113         /* Firmware blob starts with version numbers, followed by
3114            start address and length. We are setting complete length.
3115            length = end_address_of_bss - start_address_of_text.
3116            Remainder is the blob to be loaded contiguously
3117            from start address. */
3118
3119         info.fw_base = be32_to_cpu(fw_data[1]);
3120         info.fw_len = tp->fw->size - 12;
3121         info.fw_data = &fw_data[3];
3122
3123         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3124                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3125                                     &info);
3126         if (err)
3127                 return err;
3128
3129         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3130                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3131                                     &info);
3132         if (err)
3133                 return err;
3134
3135         /* Now startup only the RX cpu. */
3136         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3137         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3138
3139         for (i = 0; i < 5; i++) {
3140                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3141                         break;
3142                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3143                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3144                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3145                 udelay(1000);
3146         }
3147         if (i >= 5) {
3148                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3149                            "should be %08x\n", __func__,
3150                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3151                 return -ENODEV;
3152         }
3153         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3154         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3155
3156         return 0;
3157 }
3158
3159 /* tp->lock is held. */
3160 static int tg3_load_tso_firmware(struct tg3 *tp)
3161 {
3162         struct fw_info info;
3163         const __be32 *fw_data;
3164         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3165         int err, i;
3166
3167         if (tg3_flag(tp, HW_TSO_1) ||
3168             tg3_flag(tp, HW_TSO_2) ||
3169             tg3_flag(tp, HW_TSO_3))
3170                 return 0;
3171
3172         fw_data = (void *)tp->fw->data;
3173
3174         /* Firmware blob starts with version numbers, followed by
3175            start address and length. We are setting complete length.
3176            length = end_address_of_bss - start_address_of_text.
3177            Remainder is the blob to be loaded contiguously
3178            from start address. */
3179
3180         info.fw_base = be32_to_cpu(fw_data[1]);
3181         cpu_scratch_size = tp->fw_len;
3182         info.fw_len = tp->fw->size - 12;
3183         info.fw_data = &fw_data[3];
3184
3185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3186                 cpu_base = RX_CPU_BASE;
3187                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3188         } else {
3189                 cpu_base = TX_CPU_BASE;
3190                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3191                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3192         }
3193
3194         err = tg3_load_firmware_cpu(tp, cpu_base,
3195                                     cpu_scratch_base, cpu_scratch_size,
3196                                     &info);
3197         if (err)
3198                 return err;
3199
3200         /* Now startup the cpu. */
3201         tw32(cpu_base + CPU_STATE, 0xffffffff);
3202         tw32_f(cpu_base + CPU_PC, info.fw_base);
3203
3204         for (i = 0; i < 5; i++) {
3205                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3206                         break;
3207                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3208                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3209                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3210                 udelay(1000);
3211         }
3212         if (i >= 5) {
3213                 netdev_err(tp->dev,
3214                            "%s fails to set CPU PC, is %08x should be %08x\n",
3215                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3216                 return -ENODEV;
3217         }
3218         tw32(cpu_base + CPU_STATE, 0xffffffff);
3219         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3220         return 0;
3221 }
3222
3223
3224 /* tp->lock is held. */
3225 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3226 {
3227         u32 addr_high, addr_low;
3228         int i;
3229
3230         addr_high = ((tp->dev->dev_addr[0] << 8) |
3231                      tp->dev->dev_addr[1]);
3232         addr_low = ((tp->dev->dev_addr[2] << 24) |
3233                     (tp->dev->dev_addr[3] << 16) |
3234                     (tp->dev->dev_addr[4] <<  8) |
3235                     (tp->dev->dev_addr[5] <<  0));
3236         for (i = 0; i < 4; i++) {
3237                 if (i == 1 && skip_mac_1)
3238                         continue;
3239                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3240                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3241         }
3242
3243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3244             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3245                 for (i = 0; i < 12; i++) {
3246                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3247                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3248                 }
3249         }
3250
3251         addr_high = (tp->dev->dev_addr[0] +
3252                      tp->dev->dev_addr[1] +
3253                      tp->dev->dev_addr[2] +
3254                      tp->dev->dev_addr[3] +
3255                      tp->dev->dev_addr[4] +
3256                      tp->dev->dev_addr[5]) &
3257                 TX_BACKOFF_SEED_MASK;
3258         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3259 }
3260
3261 static void tg3_enable_register_access(struct tg3 *tp)
3262 {
3263         /*
3264          * Make sure register accesses (indirect or otherwise) will function
3265          * correctly.
3266          */
3267         pci_write_config_dword(tp->pdev,
3268                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3269 }
3270
3271 static int tg3_power_up(struct tg3 *tp)
3272 {
3273         int err;
3274
3275         tg3_enable_register_access(tp);
3276
3277         err = pci_set_power_state(tp->pdev, PCI_D0);
3278         if (!err) {
3279                 /* Switch out of Vaux if it is a NIC */
3280                 tg3_pwrsrc_switch_to_vmain(tp);
3281         } else {
3282                 netdev_err(tp->dev, "Transition to D0 failed\n");
3283         }
3284
3285         return err;
3286 }
3287
3288 static int tg3_power_down_prepare(struct tg3 *tp)
3289 {
3290         u32 misc_host_ctrl;
3291         bool device_should_wake, do_low_power;
3292
3293         tg3_enable_register_access(tp);
3294
3295         /* Restore the CLKREQ setting. */
3296         if (tg3_flag(tp, CLKREQ_BUG)) {
3297                 u16 lnkctl;
3298
3299                 pci_read_config_word(tp->pdev,
3300                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3301                                      &lnkctl);
3302                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3303                 pci_write_config_word(tp->pdev,
3304                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3305                                       lnkctl);
3306         }
3307
3308         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3309         tw32(TG3PCI_MISC_HOST_CTRL,
3310              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3311
3312         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3313                              tg3_flag(tp, WOL_ENABLE);
3314
3315         if (tg3_flag(tp, USE_PHYLIB)) {
3316                 do_low_power = false;
3317                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3318                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3319                         struct phy_device *phydev;
3320                         u32 phyid, advertising;
3321
3322                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3323
3324                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3325
3326                         tp->link_config.orig_speed = phydev->speed;
3327                         tp->link_config.orig_duplex = phydev->duplex;
3328                         tp->link_config.orig_autoneg = phydev->autoneg;
3329                         tp->link_config.orig_advertising = phydev->advertising;
3330
3331                         advertising = ADVERTISED_TP |
3332                                       ADVERTISED_Pause |
3333                                       ADVERTISED_Autoneg |
3334                                       ADVERTISED_10baseT_Half;
3335
3336                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3337                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3338                                         advertising |=
3339                                                 ADVERTISED_100baseT_Half |
3340                                                 ADVERTISED_100baseT_Full |
3341                                                 ADVERTISED_10baseT_Full;
3342                                 else
3343                                         advertising |= ADVERTISED_10baseT_Full;
3344                         }
3345
3346                         phydev->advertising = advertising;
3347
3348                         phy_start_aneg(phydev);
3349
3350                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3351                         if (phyid != PHY_ID_BCMAC131) {
3352                                 phyid &= PHY_BCM_OUI_MASK;
3353                                 if (phyid == PHY_BCM_OUI_1 ||
3354                                     phyid == PHY_BCM_OUI_2 ||
3355                                     phyid == PHY_BCM_OUI_3)
3356                                         do_low_power = true;
3357                         }
3358                 }
3359         } else {
3360                 do_low_power = true;
3361
3362                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3363                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3364                         tp->link_config.orig_speed = tp->link_config.speed;
3365                         tp->link_config.orig_duplex = tp->link_config.duplex;
3366                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3367                 }
3368
3369                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3370                         tp->link_config.speed = SPEED_10;
3371                         tp->link_config.duplex = DUPLEX_HALF;
3372                         tp->link_config.autoneg = AUTONEG_ENABLE;
3373                         tg3_setup_phy(tp, 0);
3374                 }
3375         }
3376
3377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3378                 u32 val;
3379
3380                 val = tr32(GRC_VCPU_EXT_CTRL);
3381                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3382         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3383                 int i;
3384                 u32 val;
3385
3386                 for (i = 0; i < 200; i++) {
3387                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3388                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3389                                 break;
3390                         msleep(1);
3391                 }
3392         }
3393         if (tg3_flag(tp, WOL_CAP))
3394                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3395                                                      WOL_DRV_STATE_SHUTDOWN |
3396                                                      WOL_DRV_WOL |
3397                                                      WOL_SET_MAGIC_PKT);
3398
3399         if (device_should_wake) {
3400                 u32 mac_mode;
3401
3402                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3403                         if (do_low_power &&
3404                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3405                                 tg3_phy_auxctl_write(tp,
3406                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3407                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3408                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3409                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3410                                 udelay(40);
3411                         }
3412
3413                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3414                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3415                         else
3416                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3417
3418                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3419                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3420                             ASIC_REV_5700) {
3421                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3422                                              SPEED_100 : SPEED_10;
3423                                 if (tg3_5700_link_polarity(tp, speed))
3424                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3425                                 else
3426                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3427                         }
3428                 } else {
3429                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3430                 }
3431
3432                 if (!tg3_flag(tp, 5750_PLUS))
3433                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3434
3435                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3436                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3437                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3438                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3439
3440                 if (tg3_flag(tp, ENABLE_APE))
3441                         mac_mode |= MAC_MODE_APE_TX_EN |
3442                                     MAC_MODE_APE_RX_EN |
3443                                     MAC_MODE_TDE_ENABLE;
3444
3445                 tw32_f(MAC_MODE, mac_mode);
3446                 udelay(100);
3447
3448                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3449                 udelay(10);
3450         }
3451
3452         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3453             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3454              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3455                 u32 base_val;
3456
3457                 base_val = tp->pci_clock_ctrl;
3458                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3459                              CLOCK_CTRL_TXCLK_DISABLE);
3460
3461                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3462                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3463         } else if (tg3_flag(tp, 5780_CLASS) ||
3464                    tg3_flag(tp, CPMU_PRESENT) ||
3465                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3466                 /* do nothing */
3467         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3468                 u32 newbits1, newbits2;
3469
3470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3471                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3472                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3473                                     CLOCK_CTRL_TXCLK_DISABLE |
3474                                     CLOCK_CTRL_ALTCLK);
3475                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3476                 } else if (tg3_flag(tp, 5705_PLUS)) {
3477                         newbits1 = CLOCK_CTRL_625_CORE;
3478                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3479                 } else {
3480                         newbits1 = CLOCK_CTRL_ALTCLK;
3481                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3482                 }
3483
3484                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3485                             40);
3486
3487                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3488                             40);
3489
3490                 if (!tg3_flag(tp, 5705_PLUS)) {
3491                         u32 newbits3;
3492
3493                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3494                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3495                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3496                                             CLOCK_CTRL_TXCLK_DISABLE |
3497                                             CLOCK_CTRL_44MHZ_CORE);
3498                         } else {
3499                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3500                         }
3501
3502                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3503                                     tp->pci_clock_ctrl | newbits3, 40);
3504                 }
3505         }
3506
3507         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3508                 tg3_power_down_phy(tp, do_low_power);
3509
3510         tg3_frob_aux_power(tp, true);
3511
3512         /* Workaround for unstable PLL clock */
3513         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3514             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3515                 u32 val = tr32(0x7d00);
3516
3517                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3518                 tw32(0x7d00, val);
3519                 if (!tg3_flag(tp, ENABLE_ASF)) {
3520                         int err;
3521
3522                         err = tg3_nvram_lock(tp);
3523                         tg3_halt_cpu(tp, RX_CPU_BASE);
3524                         if (!err)
3525                                 tg3_nvram_unlock(tp);
3526                 }
3527         }
3528
3529         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3530
3531         return 0;
3532 }
3533
3534 static void tg3_power_down(struct tg3 *tp)
3535 {
3536         tg3_power_down_prepare(tp);
3537
3538         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3539         pci_set_power_state(tp->pdev, PCI_D3hot);
3540 }
3541
3542 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3543 {
3544         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3545         case MII_TG3_AUX_STAT_10HALF:
3546                 *speed = SPEED_10;
3547                 *duplex = DUPLEX_HALF;
3548                 break;
3549
3550         case MII_TG3_AUX_STAT_10FULL:
3551                 *speed = SPEED_10;
3552                 *duplex = DUPLEX_FULL;
3553                 break;
3554
3555         case MII_TG3_AUX_STAT_100HALF:
3556                 *speed = SPEED_100;
3557                 *duplex = DUPLEX_HALF;
3558                 break;
3559
3560         case MII_TG3_AUX_STAT_100FULL:
3561                 *speed = SPEED_100;
3562                 *duplex = DUPLEX_FULL;
3563                 break;
3564
3565         case MII_TG3_AUX_STAT_1000HALF:
3566                 *speed = SPEED_1000;
3567                 *duplex = DUPLEX_HALF;
3568                 break;
3569
3570         case MII_TG3_AUX_STAT_1000FULL:
3571                 *speed = SPEED_1000;
3572                 *duplex = DUPLEX_FULL;
3573                 break;
3574
3575         default:
3576                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3577                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3578                                  SPEED_10;
3579                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3580                                   DUPLEX_HALF;
3581                         break;
3582                 }
3583                 *speed = SPEED_INVALID;
3584                 *duplex = DUPLEX_INVALID;
3585                 break;
3586         }
3587 }
3588
3589 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3590 {
3591         int err = 0;
3592         u32 val, new_adv;
3593
3594         new_adv = ADVERTISE_CSMA;
3595         if (advertise & ADVERTISED_10baseT_Half)
3596                 new_adv |= ADVERTISE_10HALF;
3597         if (advertise & ADVERTISED_10baseT_Full)
3598                 new_adv |= ADVERTISE_10FULL;
3599         if (advertise & ADVERTISED_100baseT_Half)
3600                 new_adv |= ADVERTISE_100HALF;
3601         if (advertise & ADVERTISED_100baseT_Full)
3602                 new_adv |= ADVERTISE_100FULL;
3603
3604         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3605
3606         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3607         if (err)
3608                 goto done;
3609
3610         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3611                 goto done;
3612
3613         new_adv = 0;
3614         if (advertise & ADVERTISED_1000baseT_Half)
3615                 new_adv |= ADVERTISE_1000HALF;
3616         if (advertise & ADVERTISED_1000baseT_Full)
3617                 new_adv |= ADVERTISE_1000FULL;
3618
3619         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3620             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3621                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3622
3623         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3624         if (err)
3625                 goto done;
3626
3627         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3628                 goto done;
3629
3630         tw32(TG3_CPMU_EEE_MODE,
3631              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3632
3633         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3634         if (!err) {
3635                 u32 err2;
3636
3637                 val = 0;
3638                 /* Advertise 100-BaseTX EEE ability */
3639                 if (advertise & ADVERTISED_100baseT_Full)
3640                         val |= MDIO_AN_EEE_ADV_100TX;
3641                 /* Advertise 1000-BaseT EEE ability */
3642                 if (advertise & ADVERTISED_1000baseT_Full)
3643                         val |= MDIO_AN_EEE_ADV_1000T;
3644                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3645                 if (err)
3646                         val = 0;
3647
3648                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3649                 case ASIC_REV_5717:
3650                 case ASIC_REV_57765:
3651                 case ASIC_REV_5719:
3652                         /* If we advertised any eee advertisements above... */
3653                         if (val)
3654                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3655                                       MII_TG3_DSP_TAP26_RMRXSTO |
3656                                       MII_TG3_DSP_TAP26_OPCSINPT;
3657                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3658                         /* Fall through */
3659                 case ASIC_REV_5720:
3660                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3661                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3662                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3663                 }
3664
3665                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3666                 if (!err)
3667                         err = err2;
3668         }
3669
3670 done:
3671         return err;
3672 }
3673
3674 static void tg3_phy_copper_begin(struct tg3 *tp)
3675 {
3676         u32 new_adv;
3677         int i;
3678
3679         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3680                 new_adv = ADVERTISED_10baseT_Half |
3681                           ADVERTISED_10baseT_Full;
3682                 if (tg3_flag(tp, WOL_SPEED_100MB))
3683                         new_adv |= ADVERTISED_100baseT_Half |
3684                                    ADVERTISED_100baseT_Full;
3685
3686                 tg3_phy_autoneg_cfg(tp, new_adv,
3687                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3688         } else if (tp->link_config.speed == SPEED_INVALID) {
3689                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3690                         tp->link_config.advertising &=
3691                                 ~(ADVERTISED_1000baseT_Half |
3692                                   ADVERTISED_1000baseT_Full);
3693
3694                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3695                                     tp->link_config.flowctrl);
3696         } else {
3697                 /* Asking for a specific link mode. */
3698                 if (tp->link_config.speed == SPEED_1000) {
3699                         if (tp->link_config.duplex == DUPLEX_FULL)
3700                                 new_adv = ADVERTISED_1000baseT_Full;
3701                         else
3702                                 new_adv = ADVERTISED_1000baseT_Half;
3703                 } else if (tp->link_config.speed == SPEED_100) {
3704                         if (tp->link_config.duplex == DUPLEX_FULL)
3705                                 new_adv = ADVERTISED_100baseT_Full;
3706                         else
3707                                 new_adv = ADVERTISED_100baseT_Half;
3708                 } else {
3709                         if (tp->link_config.duplex == DUPLEX_FULL)
3710                                 new_adv = ADVERTISED_10baseT_Full;
3711                         else
3712                                 new_adv = ADVERTISED_10baseT_Half;
3713                 }
3714
3715                 tg3_phy_autoneg_cfg(tp, new_adv,
3716                                     tp->link_config.flowctrl);
3717         }
3718
3719         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3720             tp->link_config.speed != SPEED_INVALID) {
3721                 u32 bmcr, orig_bmcr;
3722
3723                 tp->link_config.active_speed = tp->link_config.speed;
3724                 tp->link_config.active_duplex = tp->link_config.duplex;
3725
3726                 bmcr = 0;
3727                 switch (tp->link_config.speed) {
3728                 default:
3729                 case SPEED_10:
3730                         break;
3731
3732                 case SPEED_100:
3733                         bmcr |= BMCR_SPEED100;
3734                         break;
3735
3736                 case SPEED_1000:
3737                         bmcr |= BMCR_SPEED1000;
3738                         break;
3739                 }
3740
3741                 if (tp->link_config.duplex == DUPLEX_FULL)
3742                         bmcr |= BMCR_FULLDPLX;
3743
3744                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3745                     (bmcr != orig_bmcr)) {
3746                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3747                         for (i = 0; i < 1500; i++) {
3748                                 u32 tmp;
3749
3750                                 udelay(10);
3751                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3752                                     tg3_readphy(tp, MII_BMSR, &tmp))
3753                                         continue;
3754                                 if (!(tmp & BMSR_LSTATUS)) {
3755                                         udelay(40);
3756                                         break;
3757                                 }
3758                         }
3759                         tg3_writephy(tp, MII_BMCR, bmcr);
3760                         udelay(40);
3761                 }
3762         } else {
3763                 tg3_writephy(tp, MII_BMCR,
3764                              BMCR_ANENABLE | BMCR_ANRESTART);
3765         }
3766 }
3767
3768 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3769 {
3770         int err;
3771
3772         /* Turn off tap power management. */
3773         /* Set Extended packet length bit */
3774         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3775
3776         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3777         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3778         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3779         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3780         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3781
3782         udelay(40);
3783
3784         return err;
3785 }
3786
3787 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3788 {
3789         u32 adv_reg, all_mask = 0;
3790
3791         if (mask & ADVERTISED_10baseT_Half)
3792                 all_mask |= ADVERTISE_10HALF;
3793         if (mask & ADVERTISED_10baseT_Full)
3794                 all_mask |= ADVERTISE_10FULL;
3795         if (mask & ADVERTISED_100baseT_Half)
3796                 all_mask |= ADVERTISE_100HALF;
3797         if (mask & ADVERTISED_100baseT_Full)
3798                 all_mask |= ADVERTISE_100FULL;
3799
3800         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3801                 return 0;
3802
3803         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3804                 return 0;
3805
3806         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3807                 u32 tg3_ctrl;
3808
3809                 all_mask = 0;
3810                 if (mask & ADVERTISED_1000baseT_Half)
3811                         all_mask |= ADVERTISE_1000HALF;
3812                 if (mask & ADVERTISED_1000baseT_Full)
3813                         all_mask |= ADVERTISE_1000FULL;
3814
3815                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3816                         return 0;
3817
3818                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3819                 if (tg3_ctrl != all_mask)
3820                         return 0;
3821         }
3822
3823         return 1;
3824 }
3825
3826 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3827 {
3828         u32 curadv, reqadv;
3829
3830         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3831                 return 1;
3832
3833         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3834         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3835
3836         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3837                 if (curadv != reqadv)
3838                         return 0;
3839
3840                 if (tg3_flag(tp, PAUSE_AUTONEG))
3841                         tg3_readphy(tp, MII_LPA, rmtadv);
3842         } else {
3843                 /* Reprogram the advertisement register, even if it
3844                  * does not affect the current link.  If the link
3845                  * gets renegotiated in the future, we can save an
3846                  * additional renegotiation cycle by advertising
3847                  * it correctly in the first place.
3848                  */
3849                 if (curadv != reqadv) {
3850                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3851                                      ADVERTISE_PAUSE_ASYM);
3852                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3853                 }
3854         }
3855
3856         return 1;
3857 }
3858
3859 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3860 {
3861         int current_link_up;
3862         u32 bmsr, val;
3863         u32 lcl_adv, rmt_adv;
3864         u16 current_speed;
3865         u8 current_duplex;
3866         int i, err;
3867
3868         tw32(MAC_EVENT, 0);
3869
3870         tw32_f(MAC_STATUS,
3871              (MAC_STATUS_SYNC_CHANGED |
3872               MAC_STATUS_CFG_CHANGED |
3873               MAC_STATUS_MI_COMPLETION |
3874               MAC_STATUS_LNKSTATE_CHANGED));
3875         udelay(40);
3876
3877         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3878                 tw32_f(MAC_MI_MODE,
3879                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3880                 udelay(80);
3881         }
3882
3883         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3884
3885         /* Some third-party PHYs need to be reset on link going
3886          * down.
3887          */
3888         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3889              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3890              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3891             netif_carrier_ok(tp->dev)) {
3892                 tg3_readphy(tp, MII_BMSR, &bmsr);
3893                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3894                     !(bmsr & BMSR_LSTATUS))
3895                         force_reset = 1;
3896         }
3897         if (force_reset)
3898                 tg3_phy_reset(tp);
3899
3900         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3901                 tg3_readphy(tp, MII_BMSR, &bmsr);
3902                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3903                     !tg3_flag(tp, INIT_COMPLETE))
3904                         bmsr = 0;
3905
3906                 if (!(bmsr & BMSR_LSTATUS)) {
3907                         err = tg3_init_5401phy_dsp(tp);
3908                         if (err)
3909                                 return err;
3910
3911                         tg3_readphy(tp, MII_BMSR, &bmsr);
3912                         for (i = 0; i < 1000; i++) {
3913                                 udelay(10);
3914                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3915                                     (bmsr & BMSR_LSTATUS)) {
3916                                         udelay(40);
3917                                         break;
3918                                 }
3919                         }
3920
3921                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3922                             TG3_PHY_REV_BCM5401_B0 &&
3923                             !(bmsr & BMSR_LSTATUS) &&
3924                             tp->link_config.active_speed == SPEED_1000) {
3925                                 err = tg3_phy_reset(tp);
3926                                 if (!err)
3927                                         err = tg3_init_5401phy_dsp(tp);
3928                                 if (err)
3929                                         return err;
3930                         }
3931                 }
3932         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3933                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3934                 /* 5701 {A0,B0} CRC bug workaround */
3935                 tg3_writephy(tp, 0x15, 0x0a75);
3936                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3937                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3938                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3939         }
3940
3941         /* Clear pending interrupts... */
3942         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3943         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3944
3945         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3946                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3947         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3948                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3949
3950         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3952                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3953                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3954                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3955                 else
3956                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3957         }
3958
3959         current_link_up = 0;
3960         current_speed = SPEED_INVALID;
3961         current_duplex = DUPLEX_INVALID;
3962
3963         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3964                 err = tg3_phy_auxctl_read(tp,
3965                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3966                                           &val);
3967                 if (!err && !(val & (1 << 10))) {
3968                         tg3_phy_auxctl_write(tp,
3969                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3970                                              val | (1 << 10));
3971                         goto relink;
3972                 }
3973         }
3974
3975         bmsr = 0;
3976         for (i = 0; i < 100; i++) {
3977                 tg3_readphy(tp, MII_BMSR, &bmsr);
3978                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3979                     (bmsr & BMSR_LSTATUS))
3980                         break;
3981                 udelay(40);
3982         }
3983
3984         if (bmsr & BMSR_LSTATUS) {
3985                 u32 aux_stat, bmcr;
3986
3987                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3988                 for (i = 0; i < 2000; i++) {
3989                         udelay(10);
3990                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3991                             aux_stat)
3992                                 break;
3993                 }
3994
3995                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3996                                              &current_speed,
3997                                              &current_duplex);
3998
3999                 bmcr = 0;
4000                 for (i = 0; i < 200; i++) {
4001                         tg3_readphy(tp, MII_BMCR, &bmcr);
4002                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4003                                 continue;
4004                         if (bmcr && bmcr != 0x7fff)
4005                                 break;
4006                         udelay(10);
4007                 }
4008
4009                 lcl_adv = 0;
4010                 rmt_adv = 0;
4011
4012                 tp->link_config.active_speed = current_speed;
4013                 tp->link_config.active_duplex = current_duplex;
4014
4015                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4016                         if ((bmcr & BMCR_ANENABLE) &&
4017                             tg3_copper_is_advertising_all(tp,
4018                                                 tp->link_config.advertising)) {
4019                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4020                                                                   &rmt_adv))
4021                                         current_link_up = 1;
4022                         }
4023                 } else {
4024                         if (!(bmcr & BMCR_ANENABLE) &&
4025                             tp->link_config.speed == current_speed &&
4026                             tp->link_config.duplex == current_duplex &&
4027                             tp->link_config.flowctrl ==
4028                             tp->link_config.active_flowctrl) {
4029                                 current_link_up = 1;
4030                         }
4031                 }
4032
4033                 if (current_link_up == 1 &&
4034                     tp->link_config.active_duplex == DUPLEX_FULL)
4035                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4036         }
4037
4038 relink:
4039         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4040                 tg3_phy_copper_begin(tp);
4041
4042                 tg3_readphy(tp, MII_BMSR, &bmsr);
4043                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4044                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4045                         current_link_up = 1;
4046         }
4047
4048         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4049         if (current_link_up == 1) {
4050                 if (tp->link_config.active_speed == SPEED_100 ||
4051                     tp->link_config.active_speed == SPEED_10)
4052                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4053                 else
4054                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4055         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4056                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4057         else
4058                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4059
4060         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4061         if (tp->link_config.active_duplex == DUPLEX_HALF)
4062                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4063
4064         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4065                 if (current_link_up == 1 &&
4066                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4067                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4068                 else
4069                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4070         }
4071
4072         /* ??? Without this setting Netgear GA302T PHY does not
4073          * ??? send/receive packets...
4074          */
4075         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4076             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4077                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4078                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4079                 udelay(80);
4080         }
4081
4082         tw32_f(MAC_MODE, tp->mac_mode);
4083         udelay(40);
4084
4085         tg3_phy_eee_adjust(tp, current_link_up);
4086
4087         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4088                 /* Polled via timer. */
4089                 tw32_f(MAC_EVENT, 0);
4090         } else {
4091                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4092         }
4093         udelay(40);
4094
4095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4096             current_link_up == 1 &&
4097             tp->link_config.active_speed == SPEED_1000 &&
4098             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4099                 udelay(120);
4100                 tw32_f(MAC_STATUS,
4101                      (MAC_STATUS_SYNC_CHANGED |
4102                       MAC_STATUS_CFG_CHANGED));
4103                 udelay(40);
4104                 tg3_write_mem(tp,
4105                               NIC_SRAM_FIRMWARE_MBOX,
4106                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4107         }
4108
4109         /* Prevent send BD corruption. */
4110         if (tg3_flag(tp, CLKREQ_BUG)) {
4111                 u16 oldlnkctl, newlnkctl;
4112
4113                 pci_read_config_word(tp->pdev,
4114                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4115                                      &oldlnkctl);
4116                 if (tp->link_config.active_speed == SPEED_100 ||
4117                     tp->link_config.active_speed == SPEED_10)
4118                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4119                 else
4120                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4121                 if (newlnkctl != oldlnkctl)
4122                         pci_write_config_word(tp->pdev,
4123                                               pci_pcie_cap(tp->pdev) +
4124                                               PCI_EXP_LNKCTL, newlnkctl);
4125         }
4126
4127         if (current_link_up != netif_carrier_ok(tp->dev)) {
4128                 if (current_link_up)
4129                         netif_carrier_on(tp->dev);
4130                 else
4131                         netif_carrier_off(tp->dev);
4132                 tg3_link_report(tp);
4133         }
4134
4135         return 0;
4136 }
4137
4138 struct tg3_fiber_aneginfo {
4139         int state;
4140 #define ANEG_STATE_UNKNOWN              0
4141 #define ANEG_STATE_AN_ENABLE            1
4142 #define ANEG_STATE_RESTART_INIT         2
4143 #define ANEG_STATE_RESTART              3
4144 #define ANEG_STATE_DISABLE_LINK_OK      4
4145 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4146 #define ANEG_STATE_ABILITY_DETECT       6
4147 #define ANEG_STATE_ACK_DETECT_INIT      7
4148 #define ANEG_STATE_ACK_DETECT           8
4149 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4150 #define ANEG_STATE_COMPLETE_ACK         10
4151 #define ANEG_STATE_IDLE_DETECT_INIT     11
4152 #define ANEG_STATE_IDLE_DETECT          12
4153 #define ANEG_STATE_LINK_OK              13
4154 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4155 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4156
4157         u32 flags;
4158 #define MR_AN_ENABLE            0x00000001
4159 #define MR_RESTART_AN           0x00000002
4160 #define MR_AN_COMPLETE          0x00000004
4161 #define MR_PAGE_RX              0x00000008
4162 #define MR_NP_LOADED            0x00000010
4163 #define MR_TOGGLE_TX            0x00000020
4164 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4165 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4166 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4167 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4168 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4169 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4170 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4171 #define MR_TOGGLE_RX            0x00002000
4172 #define MR_NP_RX                0x00004000
4173
4174 #define MR_LINK_OK              0x80000000
4175
4176         unsigned long link_time, cur_time;
4177
4178         u32 ability_match_cfg;
4179         int ability_match_count;
4180
4181         char ability_match, idle_match, ack_match;
4182
4183         u32 txconfig, rxconfig;
4184 #define ANEG_CFG_NP             0x00000080
4185 #define ANEG_CFG_ACK            0x00000040
4186 #define ANEG_CFG_RF2            0x00000020
4187 #define ANEG_CFG_RF1            0x00000010
4188 #define ANEG_CFG_PS2            0x00000001
4189 #define ANEG_CFG_PS1            0x00008000
4190 #define ANEG_CFG_HD             0x00004000
4191 #define ANEG_CFG_FD             0x00002000
4192 #define ANEG_CFG_INVAL          0x00001f06
4193
4194 };
4195 #define ANEG_OK         0
4196 #define ANEG_DONE       1
4197 #define ANEG_TIMER_ENAB 2
4198 #define ANEG_FAILED     -1
4199
4200 #define ANEG_STATE_SETTLE_TIME  10000
4201
4202 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4203                                    struct tg3_fiber_aneginfo *ap)
4204 {
4205         u16 flowctrl;
4206         unsigned long delta;
4207         u32 rx_cfg_reg;
4208         int ret;
4209
4210         if (ap->state == ANEG_STATE_UNKNOWN) {
4211                 ap->rxconfig = 0;
4212                 ap->link_time = 0;
4213                 ap->cur_time = 0;
4214                 ap->ability_match_cfg = 0;
4215                 ap->ability_match_count = 0;
4216                 ap->ability_match = 0;
4217                 ap->idle_match = 0;
4218                 ap->ack_match = 0;
4219         }
4220         ap->cur_time++;
4221
4222         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4223                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4224
4225                 if (rx_cfg_reg != ap->ability_match_cfg) {
4226                         ap->ability_match_cfg = rx_cfg_reg;
4227                         ap->ability_match = 0;
4228                         ap->ability_match_count = 0;
4229                 } else {
4230                         if (++ap->ability_match_count > 1) {
4231                                 ap->ability_match = 1;
4232                                 ap->ability_match_cfg = rx_cfg_reg;
4233                         }
4234                 }
4235                 if (rx_cfg_reg & ANEG_CFG_ACK)
4236                         ap->ack_match = 1;
4237                 else
4238                         ap->ack_match = 0;
4239
4240                 ap->idle_match = 0;
4241         } else {
4242                 ap->idle_match = 1;
4243                 ap->ability_match_cfg = 0;
4244                 ap->ability_match_count = 0;
4245                 ap->ability_match = 0;
4246                 ap->ack_match = 0;
4247
4248                 rx_cfg_reg = 0;
4249         }
4250
4251         ap->rxconfig = rx_cfg_reg;
4252         ret = ANEG_OK;
4253
4254         switch (ap->state) {
4255         case ANEG_STATE_UNKNOWN:
4256                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4257                         ap->state = ANEG_STATE_AN_ENABLE;
4258
4259                 /* fallthru */
4260         case ANEG_STATE_AN_ENABLE:
4261                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4262                 if (ap->flags & MR_AN_ENABLE) {
4263                         ap->link_time = 0;
4264                         ap->cur_time = 0;
4265                         ap->ability_match_cfg = 0;
4266                         ap->ability_match_count = 0;
4267                         ap->ability_match = 0;
4268                         ap->idle_match = 0;
4269                         ap->ack_match = 0;
4270
4271                         ap->state = ANEG_STATE_RESTART_INIT;
4272                 } else {
4273                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4274                 }
4275                 break;
4276
4277         case ANEG_STATE_RESTART_INIT:
4278                 ap->link_time = ap->cur_time;
4279                 ap->flags &= ~(MR_NP_LOADED);
4280                 ap->txconfig = 0;
4281                 tw32(MAC_TX_AUTO_NEG, 0);
4282                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4283                 tw32_f(MAC_MODE, tp->mac_mode);
4284                 udelay(40);
4285
4286                 ret = ANEG_TIMER_ENAB;
4287                 ap->state = ANEG_STATE_RESTART;
4288
4289                 /* fallthru */
4290         case ANEG_STATE_RESTART:
4291                 delta = ap->cur_time - ap->link_time;
4292                 if (delta > ANEG_STATE_SETTLE_TIME)
4293                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4294                 else
4295                         ret = ANEG_TIMER_ENAB;
4296                 break;
4297
4298         case ANEG_STATE_DISABLE_LINK_OK:
4299                 ret = ANEG_DONE;
4300                 break;
4301
4302         case ANEG_STATE_ABILITY_DETECT_INIT:
4303                 ap->flags &= ~(MR_TOGGLE_TX);
4304                 ap->txconfig = ANEG_CFG_FD;
4305                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4306                 if (flowctrl & ADVERTISE_1000XPAUSE)
4307                         ap->txconfig |= ANEG_CFG_PS1;
4308                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4309                         ap->txconfig |= ANEG_CFG_PS2;
4310                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4311                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4312                 tw32_f(MAC_MODE, tp->mac_mode);
4313                 udelay(40);
4314
4315                 ap->state = ANEG_STATE_ABILITY_DETECT;
4316                 break;
4317
4318         case ANEG_STATE_ABILITY_DETECT:
4319                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4320                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4321                 break;
4322
4323         case ANEG_STATE_ACK_DETECT_INIT:
4324                 ap->txconfig |= ANEG_CFG_ACK;
4325                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4326                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4327                 tw32_f(MAC_MODE, tp->mac_mode);
4328                 udelay(40);
4329
4330                 ap->state = ANEG_STATE_ACK_DETECT;
4331
4332                 /* fallthru */
4333         case ANEG_STATE_ACK_DETECT:
4334                 if (ap->ack_match != 0) {
4335                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4336                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4337                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4338                         } else {
4339                                 ap->state = ANEG_STATE_AN_ENABLE;
4340                         }
4341                 } else if (ap->ability_match != 0 &&
4342                            ap->rxconfig == 0) {
4343                         ap->state = ANEG_STATE_AN_ENABLE;
4344                 }
4345                 break;
4346
4347         case ANEG_STATE_COMPLETE_ACK_INIT:
4348                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4349                         ret = ANEG_FAILED;
4350                         break;
4351                 }
4352                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4353                                MR_LP_ADV_HALF_DUPLEX |
4354                                MR_LP_ADV_SYM_PAUSE |
4355                                MR_LP_ADV_ASYM_PAUSE |
4356                                MR_LP_ADV_REMOTE_FAULT1 |
4357                                MR_LP_ADV_REMOTE_FAULT2 |
4358                                MR_LP_ADV_NEXT_PAGE |
4359                                MR_TOGGLE_RX |
4360                                MR_NP_RX);
4361                 if (ap->rxconfig & ANEG_CFG_FD)
4362                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4363                 if (ap->rxconfig & ANEG_CFG_HD)
4364                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4365                 if (ap->rxconfig & ANEG_CFG_PS1)
4366                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4367                 if (ap->rxconfig & ANEG_CFG_PS2)
4368                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4369                 if (ap->rxconfig & ANEG_CFG_RF1)
4370                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4371                 if (ap->rxconfig & ANEG_CFG_RF2)
4372                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4373                 if (ap->rxconfig & ANEG_CFG_NP)
4374                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4375
4376                 ap->link_time = ap->cur_time;
4377
4378                 ap->flags ^= (MR_TOGGLE_TX);
4379                 if (ap->rxconfig & 0x0008)
4380                         ap->flags |= MR_TOGGLE_RX;
4381                 if (ap->rxconfig & ANEG_CFG_NP)
4382                         ap->flags |= MR_NP_RX;
4383                 ap->flags |= MR_PAGE_RX;
4384
4385                 ap->state = ANEG_STATE_COMPLETE_ACK;
4386                 ret = ANEG_TIMER_ENAB;
4387                 break;
4388
4389         case ANEG_STATE_COMPLETE_ACK:
4390                 if (ap->ability_match != 0 &&
4391                     ap->rxconfig == 0) {
4392                         ap->state = ANEG_STATE_AN_ENABLE;
4393                         break;
4394                 }
4395                 delta = ap->cur_time - ap->link_time;
4396                 if (delta > ANEG_STATE_SETTLE_TIME) {
4397                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4398                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4399                         } else {
4400                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4401                                     !(ap->flags & MR_NP_RX)) {
4402                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4403                                 } else {
4404                                         ret = ANEG_FAILED;
4405                                 }
4406                         }
4407                 }
4408                 break;
4409
4410         case ANEG_STATE_IDLE_DETECT_INIT:
4411                 ap->link_time = ap->cur_time;
4412                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4413                 tw32_f(MAC_MODE, tp->mac_mode);
4414                 udelay(40);
4415
4416                 ap->state = ANEG_STATE_IDLE_DETECT;
4417                 ret = ANEG_TIMER_ENAB;
4418                 break;
4419
4420         case ANEG_STATE_IDLE_DETECT:
4421                 if (ap->ability_match != 0 &&
4422                     ap->rxconfig == 0) {
4423                         ap->state = ANEG_STATE_AN_ENABLE;
4424                         break;
4425                 }
4426                 delta = ap->cur_time - ap->link_time;
4427                 if (delta > ANEG_STATE_SETTLE_TIME) {
4428                         /* XXX another gem from the Broadcom driver :( */
4429                         ap->state = ANEG_STATE_LINK_OK;
4430                 }
4431                 break;
4432
4433         case ANEG_STATE_LINK_OK:
4434                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4435                 ret = ANEG_DONE;
4436                 break;
4437
4438         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4439                 /* ??? unimplemented */
4440                 break;
4441
4442         case ANEG_STATE_NEXT_PAGE_WAIT:
4443                 /* ??? unimplemented */
4444                 break;
4445
4446         default:
4447                 ret = ANEG_FAILED;
4448                 break;
4449         }
4450
4451         return ret;
4452 }
4453
4454 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4455 {
4456         int res = 0;
4457         struct tg3_fiber_aneginfo aninfo;
4458         int status = ANEG_FAILED;
4459         unsigned int tick;
4460         u32 tmp;
4461
4462         tw32_f(MAC_TX_AUTO_NEG, 0);
4463
4464         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4465         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4466         udelay(40);
4467
4468         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4469         udelay(40);
4470
4471         memset(&aninfo, 0, sizeof(aninfo));
4472         aninfo.flags |= MR_AN_ENABLE;
4473         aninfo.state = ANEG_STATE_UNKNOWN;
4474         aninfo.cur_time = 0;
4475         tick = 0;
4476         while (++tick < 195000) {
4477                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4478                 if (status == ANEG_DONE || status == ANEG_FAILED)
4479                         break;
4480
4481                 udelay(1);
4482         }
4483
4484         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4485         tw32_f(MAC_MODE, tp->mac_mode);
4486         udelay(40);
4487
4488         *txflags = aninfo.txconfig;
4489         *rxflags = aninfo.flags;
4490
4491         if (status == ANEG_DONE &&
4492             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4493                              MR_LP_ADV_FULL_DUPLEX)))
4494                 res = 1;
4495
4496         return res;
4497 }
4498
4499 static void tg3_init_bcm8002(struct tg3 *tp)
4500 {
4501         u32 mac_status = tr32(MAC_STATUS);
4502         int i;
4503
4504         /* Reset when initting first time or we have a link. */
4505         if (tg3_flag(tp, INIT_COMPLETE) &&
4506             !(mac_status & MAC_STATUS_PCS_SYNCED))
4507                 return;
4508
4509         /* Set PLL lock range. */
4510         tg3_writephy(tp, 0x16, 0x8007);
4511
4512         /* SW reset */
4513         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4514
4515         /* Wait for reset to complete. */
4516         /* XXX schedule_timeout() ... */
4517         for (i = 0; i < 500; i++)
4518                 udelay(10);
4519
4520         /* Config mode; select PMA/Ch 1 regs. */
4521         tg3_writephy(tp, 0x10, 0x8411);
4522
4523         /* Enable auto-lock and comdet, select txclk for tx. */
4524         tg3_writephy(tp, 0x11, 0x0a10);
4525
4526         tg3_writephy(tp, 0x18, 0x00a0);
4527         tg3_writephy(tp, 0x16, 0x41ff);
4528
4529         /* Assert and deassert POR. */
4530         tg3_writephy(tp, 0x13, 0x0400);
4531         udelay(40);
4532         tg3_writephy(tp, 0x13, 0x0000);
4533
4534         tg3_writephy(tp, 0x11, 0x0a50);
4535         udelay(40);
4536         tg3_writephy(tp, 0x11, 0x0a10);
4537
4538         /* Wait for signal to stabilize */
4539         /* XXX schedule_timeout() ... */
4540         for (i = 0; i < 15000; i++)
4541                 udelay(10);
4542
4543         /* Deselect the channel register so we can read the PHYID
4544          * later.
4545          */
4546         tg3_writephy(tp, 0x10, 0x8011);
4547 }
4548
4549 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4550 {
4551         u16 flowctrl;
4552         u32 sg_dig_ctrl, sg_dig_status;
4553         u32 serdes_cfg, expected_sg_dig_ctrl;
4554         int workaround, port_a;
4555         int current_link_up;
4556
4557         serdes_cfg = 0;
4558         expected_sg_dig_ctrl = 0;
4559         workaround = 0;
4560         port_a = 1;
4561         current_link_up = 0;
4562
4563         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4564             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4565                 workaround = 1;
4566                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4567                         port_a = 0;
4568
4569                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4570                 /* preserve bits 20-23 for voltage regulator */
4571                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4572         }
4573
4574         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4575
4576         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4577                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4578                         if (workaround) {
4579                                 u32 val = serdes_cfg;
4580
4581                                 if (port_a)
4582                                         val |= 0xc010000;
4583                                 else
4584                                         val |= 0x4010000;
4585                                 tw32_f(MAC_SERDES_CFG, val);
4586                         }
4587
4588                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4589                 }
4590                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4591                         tg3_setup_flow_control(tp, 0, 0);
4592                         current_link_up = 1;
4593                 }
4594                 goto out;
4595         }
4596
4597         /* Want auto-negotiation.  */
4598         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4599
4600         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4601         if (flowctrl & ADVERTISE_1000XPAUSE)
4602                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4603         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4604                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4605
4606         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4607                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4608                     tp->serdes_counter &&
4609                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4610                                     MAC_STATUS_RCVD_CFG)) ==
4611                      MAC_STATUS_PCS_SYNCED)) {
4612                         tp->serdes_counter--;
4613                         current_link_up = 1;
4614                         goto out;
4615                 }
4616 restart_autoneg:
4617                 if (workaround)
4618                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4619                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4620                 udelay(5);
4621                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4622
4623                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4624                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4625         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4626                                  MAC_STATUS_SIGNAL_DET)) {
4627                 sg_dig_status = tr32(SG_DIG_STATUS);
4628                 mac_status = tr32(MAC_STATUS);
4629
4630                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4631                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4632                         u32 local_adv = 0, remote_adv = 0;
4633
4634                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4635                                 local_adv |= ADVERTISE_1000XPAUSE;
4636                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4637                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4638
4639                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4640                                 remote_adv |= LPA_1000XPAUSE;
4641                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4642                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4643
4644                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4645                         current_link_up = 1;
4646                         tp->serdes_counter = 0;
4647                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4648                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4649                         if (tp->serdes_counter)
4650                                 tp->serdes_counter--;
4651                         else {
4652                                 if (workaround) {
4653                                         u32 val = serdes_cfg;
4654
4655                                         if (port_a)
4656                                                 val |= 0xc010000;
4657                                         else
4658                                                 val |= 0x4010000;
4659
4660                                         tw32_f(MAC_SERDES_CFG, val);
4661                                 }
4662
4663                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4664                                 udelay(40);
4665
4666                                 /* Link parallel detection - link is up */
4667                                 /* only if we have PCS_SYNC and not */
4668                                 /* receiving config code words */
4669                                 mac_status = tr32(MAC_STATUS);
4670                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4671                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4672                                         tg3_setup_flow_control(tp, 0, 0);
4673                                         current_link_up = 1;
4674                                         tp->phy_flags |=
4675                                                 TG3_PHYFLG_PARALLEL_DETECT;
4676                                         tp->serdes_counter =
4677                                                 SERDES_PARALLEL_DET_TIMEOUT;
4678                                 } else
4679                                         goto restart_autoneg;
4680                         }
4681                 }
4682         } else {
4683                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4684                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4685         }
4686
4687 out:
4688         return current_link_up;
4689 }
4690
4691 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4692 {
4693         int current_link_up = 0;
4694
4695         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4696                 goto out;
4697
4698         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4699                 u32 txflags, rxflags;
4700                 int i;
4701
4702                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4703                         u32 local_adv = 0, remote_adv = 0;
4704
4705                         if (txflags & ANEG_CFG_PS1)
4706                                 local_adv |= ADVERTISE_1000XPAUSE;
4707                         if (txflags & ANEG_CFG_PS2)
4708                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4709
4710                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4711                                 remote_adv |= LPA_1000XPAUSE;
4712                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4713                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4714
4715                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4716
4717                         current_link_up = 1;
4718                 }
4719                 for (i = 0; i < 30; i++) {
4720                         udelay(20);
4721                         tw32_f(MAC_STATUS,
4722                                (MAC_STATUS_SYNC_CHANGED |
4723                                 MAC_STATUS_CFG_CHANGED));
4724                         udelay(40);
4725                         if ((tr32(MAC_STATUS) &
4726                              (MAC_STATUS_SYNC_CHANGED |
4727                               MAC_STATUS_CFG_CHANGED)) == 0)
4728                                 break;
4729                 }
4730
4731                 mac_status = tr32(MAC_STATUS);
4732                 if (current_link_up == 0 &&
4733                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4734                     !(mac_status & MAC_STATUS_RCVD_CFG))
4735                         current_link_up = 1;
4736         } else {
4737                 tg3_setup_flow_control(tp, 0, 0);
4738
4739                 /* Forcing 1000FD link up. */
4740                 current_link_up = 1;
4741
4742                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4743                 udelay(40);
4744
4745                 tw32_f(MAC_MODE, tp->mac_mode);
4746                 udelay(40);
4747         }
4748
4749 out:
4750         return current_link_up;
4751 }
4752
4753 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4754 {
4755         u32 orig_pause_cfg;
4756         u16 orig_active_speed;
4757         u8 orig_active_duplex;
4758         u32 mac_status;
4759         int current_link_up;
4760         int i;
4761
4762         orig_pause_cfg = tp->link_config.active_flowctrl;
4763         orig_active_speed = tp->link_config.active_speed;
4764         orig_active_duplex = tp->link_config.active_duplex;
4765
4766         if (!tg3_flag(tp, HW_AUTONEG) &&
4767             netif_carrier_ok(tp->dev) &&
4768             tg3_flag(tp, INIT_COMPLETE)) {
4769                 mac_status = tr32(MAC_STATUS);
4770                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4771                                MAC_STATUS_SIGNAL_DET |
4772                                MAC_STATUS_CFG_CHANGED |
4773                                MAC_STATUS_RCVD_CFG);
4774                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4775                                    MAC_STATUS_SIGNAL_DET)) {
4776                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4777                                             MAC_STATUS_CFG_CHANGED));
4778                         return 0;
4779                 }
4780         }
4781
4782         tw32_f(MAC_TX_AUTO_NEG, 0);
4783
4784         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4785         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4786         tw32_f(MAC_MODE, tp->mac_mode);
4787         udelay(40);
4788
4789         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4790                 tg3_init_bcm8002(tp);
4791
4792         /* Enable link change event even when serdes polling.  */
4793         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4794         udelay(40);
4795
4796         current_link_up = 0;
4797         mac_status = tr32(MAC_STATUS);
4798
4799         if (tg3_flag(tp, HW_AUTONEG))
4800                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4801         else
4802                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4803
4804         tp->napi[0].hw_status->status =
4805                 (SD_STATUS_UPDATED |
4806                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4807
4808         for (i = 0; i < 100; i++) {
4809                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4810                                     MAC_STATUS_CFG_CHANGED));
4811                 udelay(5);
4812                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4813                                          MAC_STATUS_CFG_CHANGED |
4814                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4815                         break;
4816         }
4817
4818         mac_status = tr32(MAC_STATUS);
4819         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4820                 current_link_up = 0;
4821                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4822                     tp->serdes_counter == 0) {
4823                         tw32_f(MAC_MODE, (tp->mac_mode |
4824                                           MAC_MODE_SEND_CONFIGS));
4825                         udelay(1);
4826                         tw32_f(MAC_MODE, tp->mac_mode);
4827                 }
4828         }
4829
4830         if (current_link_up == 1) {
4831                 tp->link_config.active_speed = SPEED_1000;
4832                 tp->link_config.active_duplex = DUPLEX_FULL;
4833                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4834                                     LED_CTRL_LNKLED_OVERRIDE |
4835                                     LED_CTRL_1000MBPS_ON));
4836         } else {
4837                 tp->link_config.active_speed = SPEED_INVALID;
4838                 tp->link_config.active_duplex = DUPLEX_INVALID;
4839                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4840                                     LED_CTRL_LNKLED_OVERRIDE |
4841                                     LED_CTRL_TRAFFIC_OVERRIDE));
4842         }
4843
4844         if (current_link_up != netif_carrier_ok(tp->dev)) {
4845                 if (current_link_up)
4846                         netif_carrier_on(tp->dev);
4847                 else
4848                         netif_carrier_off(tp->dev);
4849                 tg3_link_report(tp);
4850         } else {
4851                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4852                 if (orig_pause_cfg != now_pause_cfg ||
4853                     orig_active_speed != tp->link_config.active_speed ||
4854                     orig_active_duplex != tp->link_config.active_duplex)
4855                         tg3_link_report(tp);
4856         }
4857
4858         return 0;
4859 }
4860
4861 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4862 {
4863         int current_link_up, err = 0;
4864         u32 bmsr, bmcr;
4865         u16 current_speed;
4866         u8 current_duplex;
4867         u32 local_adv, remote_adv;
4868
4869         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4870         tw32_f(MAC_MODE, tp->mac_mode);
4871         udelay(40);
4872
4873         tw32(MAC_EVENT, 0);
4874
4875         tw32_f(MAC_STATUS,
4876              (MAC_STATUS_SYNC_CHANGED |
4877               MAC_STATUS_CFG_CHANGED |
4878               MAC_STATUS_MI_COMPLETION |
4879               MAC_STATUS_LNKSTATE_CHANGED));
4880         udelay(40);
4881
4882         if (force_reset)
4883                 tg3_phy_reset(tp);
4884
4885         current_link_up = 0;
4886         current_speed = SPEED_INVALID;
4887         current_duplex = DUPLEX_INVALID;
4888
4889         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4890         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4891         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4892                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4893                         bmsr |= BMSR_LSTATUS;
4894                 else
4895                         bmsr &= ~BMSR_LSTATUS;
4896         }
4897
4898         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4899
4900         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4901             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4902                 /* do nothing, just check for link up at the end */
4903         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4904                 u32 adv, new_adv;
4905
4906                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4907                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4908                                   ADVERTISE_1000XPAUSE |
4909                                   ADVERTISE_1000XPSE_ASYM |
4910                                   ADVERTISE_SLCT);
4911
4912                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4913
4914                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4915                         new_adv |= ADVERTISE_1000XHALF;
4916                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4917                         new_adv |= ADVERTISE_1000XFULL;
4918
4919                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4920                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4921                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4922                         tg3_writephy(tp, MII_BMCR, bmcr);
4923
4924                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4925                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4926                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4927
4928                         return err;
4929                 }
4930         } else {
4931                 u32 new_bmcr;
4932
4933                 bmcr &= ~BMCR_SPEED1000;
4934                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4935
4936                 if (tp->link_config.duplex == DUPLEX_FULL)
4937                         new_bmcr |= BMCR_FULLDPLX;
4938
4939                 if (new_bmcr != bmcr) {
4940                         /* BMCR_SPEED1000 is a reserved bit that needs
4941                          * to be set on write.
4942                          */
4943                         new_bmcr |= BMCR_SPEED1000;
4944
4945                         /* Force a linkdown */
4946                         if (netif_carrier_ok(tp->dev)) {
4947                                 u32 adv;
4948
4949                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4950                                 adv &= ~(ADVERTISE_1000XFULL |
4951                                          ADVERTISE_1000XHALF |
4952                                          ADVERTISE_SLCT);
4953                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4954                                 tg3_writephy(tp, MII_BMCR, bmcr |
4955                                                            BMCR_ANRESTART |
4956                                                            BMCR_ANENABLE);
4957                                 udelay(10);
4958                                 netif_carrier_off(tp->dev);
4959                         }
4960                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4961                         bmcr = new_bmcr;
4962                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4963                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4964                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4965                             ASIC_REV_5714) {
4966                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4967                                         bmsr |= BMSR_LSTATUS;
4968                                 else
4969                                         bmsr &= ~BMSR_LSTATUS;
4970                         }
4971                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4972                 }
4973         }
4974
4975         if (bmsr & BMSR_LSTATUS) {
4976                 current_speed = SPEED_1000;
4977                 current_link_up = 1;
4978                 if (bmcr & BMCR_FULLDPLX)
4979                         current_duplex = DUPLEX_FULL;
4980                 else
4981                         current_duplex = DUPLEX_HALF;
4982
4983                 local_adv = 0;
4984                 remote_adv = 0;
4985
4986                 if (bmcr & BMCR_ANENABLE) {
4987                         u32 common;
4988
4989                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4990                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4991                         common = local_adv & remote_adv;
4992                         if (common & (ADVERTISE_1000XHALF |
4993                                       ADVERTISE_1000XFULL)) {
4994                                 if (common & ADVERTISE_1000XFULL)
4995                                         current_duplex = DUPLEX_FULL;
4996                                 else
4997                                         current_duplex = DUPLEX_HALF;
4998                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4999                                 /* Link is up via parallel detect */
5000                         } else {
5001                                 current_link_up = 0;
5002                         }
5003                 }
5004         }
5005
5006         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5007                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5008
5009         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5010         if (tp->link_config.active_duplex == DUPLEX_HALF)
5011                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5012
5013         tw32_f(MAC_MODE, tp->mac_mode);
5014         udelay(40);
5015
5016         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5017
5018         tp->link_config.active_speed = current_speed;
5019         tp->link_config.active_duplex = current_duplex;
5020
5021         if (current_link_up != netif_carrier_ok(tp->dev)) {
5022                 if (current_link_up)
5023                         netif_carrier_on(tp->dev);
5024                 else {
5025                         netif_carrier_off(tp->dev);
5026                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5027                 }
5028                 tg3_link_report(tp);
5029         }
5030         return err;
5031 }
5032
5033 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5034 {
5035         if (tp->serdes_counter) {
5036                 /* Give autoneg time to complete. */
5037                 tp->serdes_counter--;
5038                 return;
5039         }
5040
5041         if (!netif_carrier_ok(tp->dev) &&
5042             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5043                 u32 bmcr;
5044
5045                 tg3_readphy(tp, MII_BMCR, &bmcr);
5046                 if (bmcr & BMCR_ANENABLE) {
5047                         u32 phy1, phy2;
5048
5049                         /* Select shadow register 0x1f */
5050                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5051                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5052
5053                         /* Select expansion interrupt status register */
5054                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5055                                          MII_TG3_DSP_EXP1_INT_STAT);
5056                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5057                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5058
5059                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5060                                 /* We have signal detect and not receiving
5061                                  * config code words, link is up by parallel
5062                                  * detection.
5063                                  */
5064
5065                                 bmcr &= ~BMCR_ANENABLE;
5066                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5067                                 tg3_writephy(tp, MII_BMCR, bmcr);
5068                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5069                         }
5070                 }
5071         } else if (netif_carrier_ok(tp->dev) &&
5072                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5073                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5074                 u32 phy2;
5075
5076                 /* Select expansion interrupt status register */
5077                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5078                                  MII_TG3_DSP_EXP1_INT_STAT);
5079                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5080                 if (phy2 & 0x20) {
5081                         u32 bmcr;
5082
5083                         /* Config code words received, turn on autoneg. */
5084                         tg3_readphy(tp, MII_BMCR, &bmcr);
5085                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5086
5087                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5088
5089                 }
5090         }
5091 }
5092
5093 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5094 {
5095         u32 val;
5096         int err;
5097
5098         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5099                 err = tg3_setup_fiber_phy(tp, force_reset);
5100         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5101                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5102         else
5103                 err = tg3_setup_copper_phy(tp, force_reset);
5104
5105         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5106                 u32 scale;
5107
5108                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5109                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5110                         scale = 65;
5111                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5112                         scale = 6;
5113                 else
5114                         scale = 12;
5115
5116                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5117                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5118                 tw32(GRC_MISC_CFG, val);
5119         }
5120
5121         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5122               (6 << TX_LENGTHS_IPG_SHIFT);
5123         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5124                 val |= tr32(MAC_TX_LENGTHS) &
5125                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5126                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5127
5128         if (tp->link_config.active_speed == SPEED_1000 &&
5129             tp->link_config.active_duplex == DUPLEX_HALF)
5130                 tw32(MAC_TX_LENGTHS, val |
5131                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5132         else
5133                 tw32(MAC_TX_LENGTHS, val |
5134                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5135
5136         if (!tg3_flag(tp, 5705_PLUS)) {
5137                 if (netif_carrier_ok(tp->dev)) {
5138                         tw32(HOSTCC_STAT_COAL_TICKS,
5139                              tp->coal.stats_block_coalesce_usecs);
5140                 } else {
5141                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5142                 }
5143         }
5144
5145         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5146                 val = tr32(PCIE_PWR_MGMT_THRESH);
5147                 if (!netif_carrier_ok(tp->dev))
5148                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5149                               tp->pwrmgmt_thresh;
5150                 else
5151                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5152                 tw32(PCIE_PWR_MGMT_THRESH, val);
5153         }
5154
5155         return err;
5156 }
5157
5158 static inline int tg3_irq_sync(struct tg3 *tp)
5159 {
5160         return tp->irq_sync;
5161 }
5162
5163 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5164 {
5165         int i;
5166
5167         dst = (u32 *)((u8 *)dst + off);
5168         for (i = 0; i < len; i += sizeof(u32))
5169                 *dst++ = tr32(off + i);
5170 }
5171
5172 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5173 {
5174         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5175         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5176         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5177         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5178         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5179         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5180         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5181         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5182         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5183         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5184         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5185         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5186         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5187         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5188         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5189         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5190         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5191         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5192         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5193
5194         if (tg3_flag(tp, SUPPORT_MSIX))
5195                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5196
5197         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5198         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5199         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5200         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5201         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5202         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5203         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5204         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5205
5206         if (!tg3_flag(tp, 5705_PLUS)) {
5207                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5208                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5209                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5210         }
5211
5212         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5213         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5214         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5215         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5216         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5217
5218         if (tg3_flag(tp, NVRAM))
5219                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5220 }
5221
5222 static void tg3_dump_state(struct tg3 *tp)
5223 {
5224         int i;
5225         u32 *regs;
5226
5227         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5228         if (!regs) {
5229                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5230                 return;
5231         }
5232
5233         if (tg3_flag(tp, PCI_EXPRESS)) {
5234                 /* Read up to but not including private PCI registers */
5235                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5236                         regs[i / sizeof(u32)] = tr32(i);
5237         } else
5238                 tg3_dump_legacy_regs(tp, regs);
5239
5240         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5241                 if (!regs[i + 0] && !regs[i + 1] &&
5242                     !regs[i + 2] && !regs[i + 3])
5243                         continue;
5244
5245                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5246                            i * 4,
5247                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5248         }
5249
5250         kfree(regs);
5251
5252         for (i = 0; i < tp->irq_cnt; i++) {
5253                 struct tg3_napi *tnapi = &tp->napi[i];
5254
5255                 /* SW status block */
5256                 netdev_err(tp->dev,
5257                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5258                            i,
5259                            tnapi->hw_status->status,
5260                            tnapi->hw_status->status_tag,
5261                            tnapi->hw_status->rx_jumbo_consumer,
5262                            tnapi->hw_status->rx_consumer,
5263                            tnapi->hw_status->rx_mini_consumer,
5264                            tnapi->hw_status->idx[0].rx_producer,
5265                            tnapi->hw_status->idx[0].tx_consumer);
5266
5267                 netdev_err(tp->dev,
5268                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5269                            i,
5270                            tnapi->last_tag, tnapi->last_irq_tag,
5271                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5272                            tnapi->rx_rcb_ptr,
5273                            tnapi->prodring.rx_std_prod_idx,
5274                            tnapi->prodring.rx_std_cons_idx,
5275                            tnapi->prodring.rx_jmb_prod_idx,
5276                            tnapi->prodring.rx_jmb_cons_idx);
5277         }
5278 }
5279
5280 /* This is called whenever we suspect that the system chipset is re-
5281  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5282  * is bogus tx completions. We try to recover by setting the
5283  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5284  * in the workqueue.
5285  */
5286 static void tg3_tx_recover(struct tg3 *tp)
5287 {
5288         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5289                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5290
5291         netdev_warn(tp->dev,
5292                     "The system may be re-ordering memory-mapped I/O "
5293                     "cycles to the network device, attempting to recover. "
5294                     "Please report the problem to the driver maintainer "
5295                     "and include system chipset information.\n");
5296
5297         spin_lock(&tp->lock);
5298         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5299         spin_unlock(&tp->lock);
5300 }
5301
5302 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5303 {
5304         /* Tell compiler to fetch tx indices from memory. */
5305         barrier();
5306         return tnapi->tx_pending -
5307                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5308 }
5309
5310 /* Tigon3 never reports partial packet sends.  So we do not
5311  * need special logic to handle SKBs that have not had all
5312  * of their frags sent yet, like SunGEM does.
5313  */
5314 static void tg3_tx(struct tg3_napi *tnapi)
5315 {
5316         struct tg3 *tp = tnapi->tp;
5317         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5318         u32 sw_idx = tnapi->tx_cons;
5319         struct netdev_queue *txq;
5320         int index = tnapi - tp->napi;
5321
5322         if (tg3_flag(tp, ENABLE_TSS))
5323                 index--;
5324
5325         txq = netdev_get_tx_queue(tp->dev, index);
5326
5327         while (sw_idx != hw_idx) {
5328                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5329                 struct sk_buff *skb = ri->skb;
5330                 int i, tx_bug = 0;
5331
5332                 if (unlikely(skb == NULL)) {
5333                         tg3_tx_recover(tp);
5334                         return;
5335                 }
5336
5337                 pci_unmap_single(tp->pdev,
5338                                  dma_unmap_addr(ri, mapping),
5339                                  skb_headlen(skb),
5340                                  PCI_DMA_TODEVICE);
5341
5342                 ri->skb = NULL;
5343
5344                 while (ri->fragmented) {
5345                         ri->fragmented = false;
5346                         sw_idx = NEXT_TX(sw_idx);
5347                         ri = &tnapi->tx_buffers[sw_idx];
5348                 }
5349
5350                 sw_idx = NEXT_TX(sw_idx);
5351
5352                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5353                         ri = &tnapi->tx_buffers[sw_idx];
5354                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5355                                 tx_bug = 1;
5356
5357                         pci_unmap_page(tp->pdev,
5358                                        dma_unmap_addr(ri, mapping),
5359                                        skb_shinfo(skb)->frags[i].size,
5360                                        PCI_DMA_TODEVICE);
5361
5362                         while (ri->fragmented) {
5363                                 ri->fragmented = false;
5364                                 sw_idx = NEXT_TX(sw_idx);
5365                                 ri = &tnapi->tx_buffers[sw_idx];
5366                         }
5367
5368                         sw_idx = NEXT_TX(sw_idx);
5369                 }
5370
5371                 dev_kfree_skb(skb);
5372
5373                 if (unlikely(tx_bug)) {
5374                         tg3_tx_recover(tp);
5375                         return;
5376                 }
5377         }
5378
5379         tnapi->tx_cons = sw_idx;
5380
5381         /* Need to make the tx_cons update visible to tg3_start_xmit()
5382          * before checking for netif_queue_stopped().  Without the
5383          * memory barrier, there is a small possibility that tg3_start_xmit()
5384          * will miss it and cause the queue to be stopped forever.
5385          */
5386         smp_mb();
5387
5388         if (unlikely(netif_tx_queue_stopped(txq) &&
5389                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5390                 __netif_tx_lock(txq, smp_processor_id());
5391                 if (netif_tx_queue_stopped(txq) &&
5392                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5393                         netif_tx_wake_queue(txq);
5394                 __netif_tx_unlock(txq);
5395         }
5396 }
5397
5398 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5399 {
5400         if (!ri->skb)
5401                 return;
5402
5403         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5404                          map_sz, PCI_DMA_FROMDEVICE);
5405         dev_kfree_skb_any(ri->skb);
5406         ri->skb = NULL;
5407 }
5408
5409 /* Returns size of skb allocated or < 0 on error.
5410  *
5411  * We only need to fill in the address because the other members
5412  * of the RX descriptor are invariant, see tg3_init_rings.
5413  *
5414  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5415  * posting buffers we only dirty the first cache line of the RX
5416  * descriptor (containing the address).  Whereas for the RX status
5417  * buffers the cpu only reads the last cacheline of the RX descriptor
5418  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5419  */
5420 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5421                             u32 opaque_key, u32 dest_idx_unmasked)
5422 {
5423         struct tg3_rx_buffer_desc *desc;
5424         struct ring_info *map;
5425         struct sk_buff *skb;
5426         dma_addr_t mapping;
5427         int skb_size, dest_idx;
5428
5429         switch (opaque_key) {
5430         case RXD_OPAQUE_RING_STD:
5431                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5432                 desc = &tpr->rx_std[dest_idx];
5433                 map = &tpr->rx_std_buffers[dest_idx];
5434                 skb_size = tp->rx_pkt_map_sz;
5435                 break;
5436
5437         case RXD_OPAQUE_RING_JUMBO:
5438                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5439                 desc = &tpr->rx_jmb[dest_idx].std;
5440                 map = &tpr->rx_jmb_buffers[dest_idx];
5441                 skb_size = TG3_RX_JMB_MAP_SZ;
5442                 break;
5443
5444         default:
5445                 return -EINVAL;
5446         }
5447
5448         /* Do not overwrite any of the map or rp information
5449          * until we are sure we can commit to a new buffer.
5450          *
5451          * Callers depend upon this behavior and assume that
5452          * we leave everything unchanged if we fail.
5453          */
5454         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5455         if (skb == NULL)
5456                 return -ENOMEM;
5457
5458         skb_reserve(skb, TG3_RX_OFFSET(tp));
5459
5460         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5461                                  PCI_DMA_FROMDEVICE);
5462         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5463                 dev_kfree_skb(skb);
5464                 return -EIO;
5465         }
5466
5467         map->skb = skb;
5468         dma_unmap_addr_set(map, mapping, mapping);
5469
5470         desc->addr_hi = ((u64)mapping >> 32);
5471         desc->addr_lo = ((u64)mapping & 0xffffffff);
5472
5473         return skb_size;
5474 }
5475
5476 /* We only need to move over in the address because the other
5477  * members of the RX descriptor are invariant.  See notes above
5478  * tg3_alloc_rx_skb for full details.
5479  */
5480 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5481                            struct tg3_rx_prodring_set *dpr,
5482                            u32 opaque_key, int src_idx,
5483                            u32 dest_idx_unmasked)
5484 {
5485         struct tg3 *tp = tnapi->tp;
5486         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5487         struct ring_info *src_map, *dest_map;
5488         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5489         int dest_idx;
5490
5491         switch (opaque_key) {
5492         case RXD_OPAQUE_RING_STD:
5493                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5494                 dest_desc = &dpr->rx_std[dest_idx];
5495                 dest_map = &dpr->rx_std_buffers[dest_idx];
5496                 src_desc = &spr->rx_std[src_idx];
5497                 src_map = &spr->rx_std_buffers[src_idx];
5498                 break;
5499
5500         case RXD_OPAQUE_RING_JUMBO:
5501                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5502                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5503                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5504                 src_desc = &spr->rx_jmb[src_idx].std;
5505                 src_map = &spr->rx_jmb_buffers[src_idx];
5506                 break;
5507
5508         default:
5509                 return;
5510         }
5511
5512         dest_map->skb = src_map->skb;
5513         dma_unmap_addr_set(dest_map, mapping,
5514                            dma_unmap_addr(src_map, mapping));
5515         dest_desc->addr_hi = src_desc->addr_hi;
5516         dest_desc->addr_lo = src_desc->addr_lo;
5517
5518         /* Ensure that the update to the skb happens after the physical
5519          * addresses have been transferred to the new BD location.
5520          */
5521         smp_wmb();
5522
5523         src_map->skb = NULL;
5524 }
5525
5526 /* The RX ring scheme is composed of multiple rings which post fresh
5527  * buffers to the chip, and one special ring the chip uses to report
5528  * status back to the host.
5529  *
5530  * The special ring reports the status of received packets to the
5531  * host.  The chip does not write into the original descriptor the
5532  * RX buffer was obtained from.  The chip simply takes the original
5533  * descriptor as provided by the host, updates the status and length
5534  * field, then writes this into the next status ring entry.
5535  *
5536  * Each ring the host uses to post buffers to the chip is described
5537  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5538  * it is first placed into the on-chip ram.  When the packet's length
5539  * is known, it walks down the TG3_BDINFO entries to select the ring.
5540  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5541  * which is within the range of the new packet's length is chosen.
5542  *
5543  * The "separate ring for rx status" scheme may sound queer, but it makes
5544  * sense from a cache coherency perspective.  If only the host writes
5545  * to the buffer post rings, and only the chip writes to the rx status
5546  * rings, then cache lines never move beyond shared-modified state.
5547  * If both the host and chip were to write into the same ring, cache line
5548  * eviction could occur since both entities want it in an exclusive state.
5549  */
5550 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5551 {
5552         struct tg3 *tp = tnapi->tp;
5553         u32 work_mask, rx_std_posted = 0;
5554         u32 std_prod_idx, jmb_prod_idx;
5555         u32 sw_idx = tnapi->rx_rcb_ptr;
5556         u16 hw_idx;
5557         int received;
5558         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5559
5560         hw_idx = *(tnapi->rx_rcb_prod_idx);
5561         /*
5562          * We need to order the read of hw_idx and the read of
5563          * the opaque cookie.
5564          */
5565         rmb();
5566         work_mask = 0;
5567         received = 0;
5568         std_prod_idx = tpr->rx_std_prod_idx;
5569         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5570         while (sw_idx != hw_idx && budget > 0) {
5571                 struct ring_info *ri;
5572                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5573                 unsigned int len;
5574                 struct sk_buff *skb;
5575                 dma_addr_t dma_addr;
5576                 u32 opaque_key, desc_idx, *post_ptr;
5577
5578                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5579                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5580                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5581                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5582                         dma_addr = dma_unmap_addr(ri, mapping);
5583                         skb = ri->skb;
5584                         post_ptr = &std_prod_idx;
5585                         rx_std_posted++;
5586                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5587                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5588                         dma_addr = dma_unmap_addr(ri, mapping);
5589                         skb = ri->skb;
5590                         post_ptr = &jmb_prod_idx;
5591                 } else
5592                         goto next_pkt_nopost;
5593
5594                 work_mask |= opaque_key;
5595
5596                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5597                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5598                 drop_it:
5599                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5600                                        desc_idx, *post_ptr);
5601                 drop_it_no_recycle:
5602                         /* Other statistics kept track of by card. */
5603                         tp->rx_dropped++;
5604                         goto next_pkt;
5605                 }
5606
5607                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5608                       ETH_FCS_LEN;
5609
5610                 if (len > TG3_RX_COPY_THRESH(tp)) {
5611                         int skb_size;
5612
5613                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5614                                                     *post_ptr);
5615                         if (skb_size < 0)
5616                                 goto drop_it;
5617
5618                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5619                                          PCI_DMA_FROMDEVICE);
5620
5621                         /* Ensure that the update to the skb happens
5622                          * after the usage of the old DMA mapping.
5623                          */
5624                         smp_wmb();
5625
5626                         ri->skb = NULL;
5627
5628                         skb_put(skb, len);
5629                 } else {
5630                         struct sk_buff *copy_skb;
5631
5632                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5633                                        desc_idx, *post_ptr);
5634
5635                         copy_skb = netdev_alloc_skb(tp->dev, len +
5636                                                     TG3_RAW_IP_ALIGN);
5637                         if (copy_skb == NULL)
5638                                 goto drop_it_no_recycle;
5639
5640                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5641                         skb_put(copy_skb, len);
5642                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5643                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5644                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5645
5646                         /* We'll reuse the original ring buffer. */
5647                         skb = copy_skb;
5648                 }
5649
5650                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5651                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5652                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5653                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5654                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5655                 else
5656                         skb_checksum_none_assert(skb);
5657
5658                 skb->protocol = eth_type_trans(skb, tp->dev);
5659
5660                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5661                     skb->protocol != htons(ETH_P_8021Q)) {
5662                         dev_kfree_skb(skb);
5663                         goto drop_it_no_recycle;
5664                 }
5665
5666                 if (desc->type_flags & RXD_FLAG_VLAN &&
5667                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5668                         __vlan_hwaccel_put_tag(skb,
5669                                                desc->err_vlan & RXD_VLAN_MASK);
5670
5671                 napi_gro_receive(&tnapi->napi, skb);
5672
5673                 received++;
5674                 budget--;
5675
5676 next_pkt:
5677                 (*post_ptr)++;
5678
5679                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5680                         tpr->rx_std_prod_idx = std_prod_idx &
5681                                                tp->rx_std_ring_mask;
5682                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5683                                      tpr->rx_std_prod_idx);
5684                         work_mask &= ~RXD_OPAQUE_RING_STD;
5685                         rx_std_posted = 0;
5686                 }
5687 next_pkt_nopost:
5688                 sw_idx++;
5689                 sw_idx &= tp->rx_ret_ring_mask;
5690
5691                 /* Refresh hw_idx to see if there is new work */
5692                 if (sw_idx == hw_idx) {
5693                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5694                         rmb();
5695                 }
5696         }
5697
5698         /* ACK the status ring. */
5699         tnapi->rx_rcb_ptr = sw_idx;
5700         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5701
5702         /* Refill RX ring(s). */
5703         if (!tg3_flag(tp, ENABLE_RSS)) {
5704                 if (work_mask & RXD_OPAQUE_RING_STD) {
5705                         tpr->rx_std_prod_idx = std_prod_idx &
5706                                                tp->rx_std_ring_mask;
5707                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5708                                      tpr->rx_std_prod_idx);
5709                 }
5710                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5711                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5712                                                tp->rx_jmb_ring_mask;
5713                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5714                                      tpr->rx_jmb_prod_idx);
5715                 }
5716                 mmiowb();
5717         } else if (work_mask) {
5718                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5719                  * updated before the producer indices can be updated.
5720                  */
5721                 smp_wmb();
5722
5723                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5724                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5725
5726                 if (tnapi != &tp->napi[1])
5727                         napi_schedule(&tp->napi[1].napi);
5728         }
5729
5730         return received;
5731 }
5732
5733 static void tg3_poll_link(struct tg3 *tp)
5734 {
5735         /* handle link change and other phy events */
5736         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5737                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5738
5739                 if (sblk->status & SD_STATUS_LINK_CHG) {
5740                         sblk->status = SD_STATUS_UPDATED |
5741                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5742                         spin_lock(&tp->lock);
5743                         if (tg3_flag(tp, USE_PHYLIB)) {
5744                                 tw32_f(MAC_STATUS,
5745                                      (MAC_STATUS_SYNC_CHANGED |
5746                                       MAC_STATUS_CFG_CHANGED |
5747                                       MAC_STATUS_MI_COMPLETION |
5748                                       MAC_STATUS_LNKSTATE_CHANGED));
5749                                 udelay(40);
5750                         } else
5751                                 tg3_setup_phy(tp, 0);
5752                         spin_unlock(&tp->lock);
5753                 }
5754         }
5755 }
5756
5757 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5758                                 struct tg3_rx_prodring_set *dpr,
5759                                 struct tg3_rx_prodring_set *spr)
5760 {
5761         u32 si, di, cpycnt, src_prod_idx;
5762         int i, err = 0;
5763
5764         while (1) {
5765                 src_prod_idx = spr->rx_std_prod_idx;
5766
5767                 /* Make sure updates to the rx_std_buffers[] entries and the
5768                  * standard producer index are seen in the correct order.
5769                  */
5770                 smp_rmb();
5771
5772                 if (spr->rx_std_cons_idx == src_prod_idx)
5773                         break;
5774
5775                 if (spr->rx_std_cons_idx < src_prod_idx)
5776                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5777                 else
5778                         cpycnt = tp->rx_std_ring_mask + 1 -
5779                                  spr->rx_std_cons_idx;
5780
5781                 cpycnt = min(cpycnt,
5782                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5783
5784                 si = spr->rx_std_cons_idx;
5785                 di = dpr->rx_std_prod_idx;
5786
5787                 for (i = di; i < di + cpycnt; i++) {
5788                         if (dpr->rx_std_buffers[i].skb) {
5789                                 cpycnt = i - di;
5790                                 err = -ENOSPC;
5791                                 break;
5792                         }
5793                 }
5794
5795                 if (!cpycnt)
5796                         break;
5797
5798                 /* Ensure that updates to the rx_std_buffers ring and the
5799                  * shadowed hardware producer ring from tg3_recycle_skb() are
5800                  * ordered correctly WRT the skb check above.
5801                  */
5802                 smp_rmb();
5803
5804                 memcpy(&dpr->rx_std_buffers[di],
5805                        &spr->rx_std_buffers[si],
5806                        cpycnt * sizeof(struct ring_info));
5807
5808                 for (i = 0; i < cpycnt; i++, di++, si++) {
5809                         struct tg3_rx_buffer_desc *sbd, *dbd;
5810                         sbd = &spr->rx_std[si];
5811                         dbd = &dpr->rx_std[di];
5812                         dbd->addr_hi = sbd->addr_hi;
5813                         dbd->addr_lo = sbd->addr_lo;
5814                 }
5815
5816                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5817                                        tp->rx_std_ring_mask;
5818                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5819                                        tp->rx_std_ring_mask;
5820         }
5821
5822         while (1) {
5823                 src_prod_idx = spr->rx_jmb_prod_idx;
5824
5825                 /* Make sure updates to the rx_jmb_buffers[] entries and
5826                  * the jumbo producer index are seen in the correct order.
5827                  */
5828                 smp_rmb();
5829
5830                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5831                         break;
5832
5833                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5834                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5835                 else
5836                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5837                                  spr->rx_jmb_cons_idx;
5838
5839                 cpycnt = min(cpycnt,
5840                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5841
5842                 si = spr->rx_jmb_cons_idx;
5843                 di = dpr->rx_jmb_prod_idx;
5844
5845                 for (i = di; i < di + cpycnt; i++) {
5846                         if (dpr->rx_jmb_buffers[i].skb) {
5847                                 cpycnt = i - di;
5848                                 err = -ENOSPC;
5849                                 break;
5850                         }
5851                 }
5852
5853                 if (!cpycnt)
5854                         break;
5855
5856                 /* Ensure that updates to the rx_jmb_buffers ring and the
5857                  * shadowed hardware producer ring from tg3_recycle_skb() are
5858                  * ordered correctly WRT the skb check above.
5859                  */
5860                 smp_rmb();
5861
5862                 memcpy(&dpr->rx_jmb_buffers[di],
5863                        &spr->rx_jmb_buffers[si],
5864                        cpycnt * sizeof(struct ring_info));
5865
5866                 for (i = 0; i < cpycnt; i++, di++, si++) {
5867                         struct tg3_rx_buffer_desc *sbd, *dbd;
5868                         sbd = &spr->rx_jmb[si].std;
5869                         dbd = &dpr->rx_jmb[di].std;
5870                         dbd->addr_hi = sbd->addr_hi;
5871                         dbd->addr_lo = sbd->addr_lo;
5872                 }
5873
5874                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5875                                        tp->rx_jmb_ring_mask;
5876                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5877                                        tp->rx_jmb_ring_mask;
5878         }
5879
5880         return err;
5881 }
5882
5883 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5884 {
5885         struct tg3 *tp = tnapi->tp;
5886
5887         /* run TX completion thread */
5888         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5889                 tg3_tx(tnapi);
5890                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5891                         return work_done;
5892         }
5893
5894         /* run RX thread, within the bounds set by NAPI.
5895          * All RX "locking" is done by ensuring outside
5896          * code synchronizes with tg3->napi.poll()
5897          */
5898         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5899                 work_done += tg3_rx(tnapi, budget - work_done);
5900
5901         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5902                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5903                 int i, err = 0;
5904                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5905                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5906
5907                 for (i = 1; i < tp->irq_cnt; i++)
5908                         err |= tg3_rx_prodring_xfer(tp, dpr,
5909                                                     &tp->napi[i].prodring);
5910
5911                 wmb();
5912
5913                 if (std_prod_idx != dpr->rx_std_prod_idx)
5914                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5915                                      dpr->rx_std_prod_idx);
5916
5917                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5918                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5919                                      dpr->rx_jmb_prod_idx);
5920
5921                 mmiowb();
5922
5923                 if (err)
5924                         tw32_f(HOSTCC_MODE, tp->coal_now);
5925         }
5926
5927         return work_done;
5928 }
5929
5930 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5931 {
5932         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5933         struct tg3 *tp = tnapi->tp;
5934         int work_done = 0;
5935         struct tg3_hw_status *sblk = tnapi->hw_status;
5936
5937         while (1) {
5938                 work_done = tg3_poll_work(tnapi, work_done, budget);
5939
5940                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5941                         goto tx_recovery;
5942
5943                 if (unlikely(work_done >= budget))
5944                         break;
5945
5946                 /* tp->last_tag is used in tg3_int_reenable() below
5947                  * to tell the hw how much work has been processed,
5948                  * so we must read it before checking for more work.
5949                  */
5950                 tnapi->last_tag = sblk->status_tag;
5951                 tnapi->last_irq_tag = tnapi->last_tag;
5952                 rmb();
5953
5954                 /* check for RX/TX work to do */
5955                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5956                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5957                         napi_complete(napi);
5958                         /* Reenable interrupts. */
5959                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5960                         mmiowb();
5961                         break;
5962                 }
5963         }
5964
5965         return work_done;
5966
5967 tx_recovery:
5968         /* work_done is guaranteed to be less than budget. */
5969         napi_complete(napi);
5970         schedule_work(&tp->reset_task);
5971         return work_done;
5972 }
5973
5974 static void tg3_process_error(struct tg3 *tp)
5975 {
5976         u32 val;
5977         bool real_error = false;
5978
5979         if (tg3_flag(tp, ERROR_PROCESSED))
5980                 return;
5981
5982         /* Check Flow Attention register */
5983         val = tr32(HOSTCC_FLOW_ATTN);
5984         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5985                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5986                 real_error = true;
5987         }
5988
5989         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5990                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5991                 real_error = true;
5992         }
5993
5994         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5995                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5996                 real_error = true;
5997         }
5998
5999         if (!real_error)
6000                 return;
6001
6002         tg3_dump_state(tp);
6003
6004         tg3_flag_set(tp, ERROR_PROCESSED);
6005         schedule_work(&tp->reset_task);
6006 }
6007
6008 static int tg3_poll(struct napi_struct *napi, int budget)
6009 {
6010         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6011         struct tg3 *tp = tnapi->tp;
6012         int work_done = 0;
6013         struct tg3_hw_status *sblk = tnapi->hw_status;
6014
6015         while (1) {
6016                 if (sblk->status & SD_STATUS_ERROR)
6017                         tg3_process_error(tp);
6018
6019                 tg3_poll_link(tp);
6020
6021                 work_done = tg3_poll_work(tnapi, work_done, budget);
6022
6023                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6024                         goto tx_recovery;
6025
6026                 if (unlikely(work_done >= budget))
6027                         break;
6028
6029                 if (tg3_flag(tp, TAGGED_STATUS)) {
6030                         /* tp->last_tag is used in tg3_int_reenable() below
6031                          * to tell the hw how much work has been processed,
6032                          * so we must read it before checking for more work.
6033                          */
6034                         tnapi->last_tag = sblk->status_tag;
6035                         tnapi->last_irq_tag = tnapi->last_tag;
6036                         rmb();
6037                 } else
6038                         sblk->status &= ~SD_STATUS_UPDATED;
6039
6040                 if (likely(!tg3_has_work(tnapi))) {
6041                         napi_complete(napi);
6042                         tg3_int_reenable(tnapi);
6043                         break;
6044                 }
6045         }
6046
6047         return work_done;
6048
6049 tx_recovery:
6050         /* work_done is guaranteed to be less than budget. */
6051         napi_complete(napi);
6052         schedule_work(&tp->reset_task);
6053         return work_done;
6054 }
6055
6056 static void tg3_napi_disable(struct tg3 *tp)
6057 {
6058         int i;
6059
6060         for (i = tp->irq_cnt - 1; i >= 0; i--)
6061                 napi_disable(&tp->napi[i].napi);
6062 }
6063
6064 static void tg3_napi_enable(struct tg3 *tp)
6065 {
6066         int i;
6067
6068         for (i = 0; i < tp->irq_cnt; i++)
6069                 napi_enable(&tp->napi[i].napi);
6070 }
6071
6072 static void tg3_napi_init(struct tg3 *tp)
6073 {
6074         int i;
6075
6076         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6077         for (i = 1; i < tp->irq_cnt; i++)
6078                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6079 }
6080
6081 static void tg3_napi_fini(struct tg3 *tp)
6082 {
6083         int i;
6084
6085         for (i = 0; i < tp->irq_cnt; i++)
6086                 netif_napi_del(&tp->napi[i].napi);
6087 }
6088
6089 static inline void tg3_netif_stop(struct tg3 *tp)
6090 {
6091         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6092         tg3_napi_disable(tp);
6093         netif_tx_disable(tp->dev);
6094 }
6095
6096 static inline void tg3_netif_start(struct tg3 *tp)
6097 {
6098         /* NOTE: unconditional netif_tx_wake_all_queues is only
6099          * appropriate so long as all callers are assured to
6100          * have free tx slots (such as after tg3_init_hw)
6101          */
6102         netif_tx_wake_all_queues(tp->dev);
6103
6104         tg3_napi_enable(tp);
6105         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6106         tg3_enable_ints(tp);
6107 }
6108
6109 static void tg3_irq_quiesce(struct tg3 *tp)
6110 {
6111         int i;
6112
6113         BUG_ON(tp->irq_sync);
6114
6115         tp->irq_sync = 1;
6116         smp_mb();
6117
6118         for (i = 0; i < tp->irq_cnt; i++)
6119                 synchronize_irq(tp->napi[i].irq_vec);
6120 }
6121
6122 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6123  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6124  * with as well.  Most of the time, this is not necessary except when
6125  * shutting down the device.
6126  */
6127 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6128 {
6129         spin_lock_bh(&tp->lock);
6130         if (irq_sync)
6131                 tg3_irq_quiesce(tp);
6132 }
6133
6134 static inline void tg3_full_unlock(struct tg3 *tp)
6135 {
6136         spin_unlock_bh(&tp->lock);
6137 }
6138
6139 /* One-shot MSI handler - Chip automatically disables interrupt
6140  * after sending MSI so driver doesn't have to do it.
6141  */
6142 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6143 {
6144         struct tg3_napi *tnapi = dev_id;
6145         struct tg3 *tp = tnapi->tp;
6146
6147         prefetch(tnapi->hw_status);
6148         if (tnapi->rx_rcb)
6149                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6150
6151         if (likely(!tg3_irq_sync(tp)))
6152                 napi_schedule(&tnapi->napi);
6153
6154         return IRQ_HANDLED;
6155 }
6156
6157 /* MSI ISR - No need to check for interrupt sharing and no need to
6158  * flush status block and interrupt mailbox. PCI ordering rules
6159  * guarantee that MSI will arrive after the status block.
6160  */
6161 static irqreturn_t tg3_msi(int irq, void *dev_id)
6162 {
6163         struct tg3_napi *tnapi = dev_id;
6164         struct tg3 *tp = tnapi->tp;
6165
6166         prefetch(tnapi->hw_status);
6167         if (tnapi->rx_rcb)
6168                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6169         /*
6170          * Writing any value to intr-mbox-0 clears PCI INTA# and
6171          * chip-internal interrupt pending events.
6172          * Writing non-zero to intr-mbox-0 additional tells the
6173          * NIC to stop sending us irqs, engaging "in-intr-handler"
6174          * event coalescing.
6175          */
6176         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6177         if (likely(!tg3_irq_sync(tp)))
6178                 napi_schedule(&tnapi->napi);
6179
6180         return IRQ_RETVAL(1);
6181 }
6182
6183 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6184 {
6185         struct tg3_napi *tnapi = dev_id;
6186         struct tg3 *tp = tnapi->tp;
6187         struct tg3_hw_status *sblk = tnapi->hw_status;
6188         unsigned int handled = 1;
6189
6190         /* In INTx mode, it is possible for the interrupt to arrive at
6191          * the CPU before the status block posted prior to the interrupt.
6192          * Reading the PCI State register will confirm whether the
6193          * interrupt is ours and will flush the status block.
6194          */
6195         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6196                 if (tg3_flag(tp, CHIP_RESETTING) ||
6197                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6198                         handled = 0;
6199                         goto out;
6200                 }
6201         }
6202
6203         /*
6204          * Writing any value to intr-mbox-0 clears PCI INTA# and
6205          * chip-internal interrupt pending events.
6206          * Writing non-zero to intr-mbox-0 additional tells the
6207          * NIC to stop sending us irqs, engaging "in-intr-handler"
6208          * event coalescing.
6209          *
6210          * Flush the mailbox to de-assert the IRQ immediately to prevent
6211          * spurious interrupts.  The flush impacts performance but
6212          * excessive spurious interrupts can be worse in some cases.
6213          */
6214         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6215         if (tg3_irq_sync(tp))
6216                 goto out;
6217         sblk->status &= ~SD_STATUS_UPDATED;
6218         if (likely(tg3_has_work(tnapi))) {
6219                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6220                 napi_schedule(&tnapi->napi);
6221         } else {
6222                 /* No work, shared interrupt perhaps?  re-enable
6223                  * interrupts, and flush that PCI write
6224                  */
6225                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6226                                0x00000000);
6227         }
6228 out:
6229         return IRQ_RETVAL(handled);
6230 }
6231
6232 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6233 {
6234         struct tg3_napi *tnapi = dev_id;
6235         struct tg3 *tp = tnapi->tp;
6236         struct tg3_hw_status *sblk = tnapi->hw_status;
6237         unsigned int handled = 1;
6238
6239         /* In INTx mode, it is possible for the interrupt to arrive at
6240          * the CPU before the status block posted prior to the interrupt.
6241          * Reading the PCI State register will confirm whether the
6242          * interrupt is ours and will flush the status block.
6243          */
6244         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6245                 if (tg3_flag(tp, CHIP_RESETTING) ||
6246                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6247                         handled = 0;
6248                         goto out;
6249                 }
6250         }
6251
6252         /*
6253          * writing any value to intr-mbox-0 clears PCI INTA# and
6254          * chip-internal interrupt pending events.
6255          * writing non-zero to intr-mbox-0 additional tells the
6256          * NIC to stop sending us irqs, engaging "in-intr-handler"
6257          * event coalescing.
6258          *
6259          * Flush the mailbox to de-assert the IRQ immediately to prevent
6260          * spurious interrupts.  The flush impacts performance but
6261          * excessive spurious interrupts can be worse in some cases.
6262          */
6263         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6264
6265         /*
6266          * In a shared interrupt configuration, sometimes other devices'
6267          * interrupts will scream.  We record the current status tag here
6268          * so that the above check can report that the screaming interrupts
6269          * are unhandled.  Eventually they will be silenced.
6270          */
6271         tnapi->last_irq_tag = sblk->status_tag;
6272
6273         if (tg3_irq_sync(tp))
6274                 goto out;
6275
6276         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6277
6278         napi_schedule(&tnapi->napi);
6279
6280 out:
6281         return IRQ_RETVAL(handled);
6282 }
6283
6284 /* ISR for interrupt test */
6285 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6286 {
6287         struct tg3_napi *tnapi = dev_id;
6288         struct tg3 *tp = tnapi->tp;
6289         struct tg3_hw_status *sblk = tnapi->hw_status;
6290
6291         if ((sblk->status & SD_STATUS_UPDATED) ||
6292             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6293                 tg3_disable_ints(tp);
6294                 return IRQ_RETVAL(1);
6295         }
6296         return IRQ_RETVAL(0);
6297 }
6298
6299 static int tg3_init_hw(struct tg3 *, int);
6300 static int tg3_halt(struct tg3 *, int, int);
6301
6302 /* Restart hardware after configuration changes, self-test, etc.
6303  * Invoked with tp->lock held.
6304  */
6305 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6306         __releases(tp->lock)
6307         __acquires(tp->lock)
6308 {
6309         int err;
6310
6311         err = tg3_init_hw(tp, reset_phy);
6312         if (err) {
6313                 netdev_err(tp->dev,
6314                            "Failed to re-initialize device, aborting\n");
6315                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6316                 tg3_full_unlock(tp);
6317                 del_timer_sync(&tp->timer);
6318                 tp->irq_sync = 0;
6319                 tg3_napi_enable(tp);
6320                 dev_close(tp->dev);
6321                 tg3_full_lock(tp, 0);
6322         }
6323         return err;
6324 }
6325
6326 #ifdef CONFIG_NET_POLL_CONTROLLER
6327 static void tg3_poll_controller(struct net_device *dev)
6328 {
6329         int i;
6330         struct tg3 *tp = netdev_priv(dev);
6331
6332         for (i = 0; i < tp->irq_cnt; i++)
6333                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6334 }
6335 #endif
6336
6337 static void tg3_reset_task(struct work_struct *work)
6338 {
6339         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6340         int err;
6341         unsigned int restart_timer;
6342
6343         tg3_full_lock(tp, 0);
6344
6345         if (!netif_running(tp->dev)) {
6346                 tg3_full_unlock(tp);
6347                 return;
6348         }
6349
6350         tg3_full_unlock(tp);
6351
6352         tg3_phy_stop(tp);
6353
6354         tg3_netif_stop(tp);
6355
6356         tg3_full_lock(tp, 1);
6357
6358         restart_timer = tg3_flag(tp, RESTART_TIMER);
6359         tg3_flag_clear(tp, RESTART_TIMER);
6360
6361         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6362                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6363                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6364                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6365                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6366         }
6367
6368         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6369         err = tg3_init_hw(tp, 1);
6370         if (err)
6371                 goto out;
6372
6373         tg3_netif_start(tp);
6374
6375         if (restart_timer)
6376                 mod_timer(&tp->timer, jiffies + 1);
6377
6378 out:
6379         tg3_full_unlock(tp);
6380
6381         if (!err)
6382                 tg3_phy_start(tp);
6383 }
6384
6385 static void tg3_tx_timeout(struct net_device *dev)
6386 {
6387         struct tg3 *tp = netdev_priv(dev);
6388
6389         if (netif_msg_tx_err(tp)) {
6390                 netdev_err(dev, "transmit timed out, resetting\n");
6391                 tg3_dump_state(tp);
6392         }
6393
6394         schedule_work(&tp->reset_task);
6395 }
6396
6397 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6398 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6399 {
6400         u32 base = (u32) mapping & 0xffffffff;
6401
6402         return (base > 0xffffdcc0) && (base + len + 8 < base);
6403 }
6404
6405 /* Test for DMA addresses > 40-bit */
6406 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6407                                           int len)
6408 {
6409 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6410         if (tg3_flag(tp, 40BIT_DMA_BUG))
6411                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6412         return 0;
6413 #else
6414         return 0;
6415 #endif
6416 }
6417
6418 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6419                                  dma_addr_t mapping, u32 len, u32 flags,
6420                                  u32 mss, u32 vlan)
6421 {
6422         txbd->addr_hi = ((u64) mapping >> 32);
6423         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6424         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6425         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6426 }
6427
6428 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6429                             dma_addr_t map, u32 len, u32 flags,
6430                             u32 mss, u32 vlan)
6431 {
6432         struct tg3 *tp = tnapi->tp;
6433         bool hwbug = false;
6434
6435         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6436                 hwbug = 1;
6437
6438         if (tg3_4g_overflow_test(map, len))
6439                 hwbug = 1;
6440
6441         if (tg3_40bit_overflow_test(tp, map, len))
6442                 hwbug = 1;
6443
6444         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6445                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6446                 while (len > TG3_TX_BD_DMA_MAX) {
6447                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6448                         len -= TG3_TX_BD_DMA_MAX;
6449
6450                         if (len) {
6451                                 tnapi->tx_buffers[*entry].fragmented = true;
6452                                 /* Avoid the 8byte DMA problem */
6453                                 if (len <= 8) {
6454                                         len += TG3_TX_BD_DMA_MAX / 2;
6455                                         frag_len = TG3_TX_BD_DMA_MAX / 2;
6456                                 }
6457                         } else
6458                                 tmp_flag = flags;
6459
6460                         if (*budget) {
6461                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6462                                               frag_len, tmp_flag, mss, vlan);
6463                                 (*budget)--;
6464                                 *entry = NEXT_TX(*entry);
6465                         } else {
6466                                 hwbug = 1;
6467                                 break;
6468                         }
6469
6470                         map += frag_len;
6471                 }
6472
6473                 if (len) {
6474                         if (*budget) {
6475                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6476                                               len, flags, mss, vlan);
6477                                 (*budget)--;
6478                                 *entry = NEXT_TX(*entry);
6479                         } else {
6480                                 hwbug = 1;
6481                         }
6482                 }
6483         } else {
6484                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6485                               len, flags, mss, vlan);
6486                 *entry = NEXT_TX(*entry);
6487         }
6488
6489         return hwbug;
6490 }
6491
6492 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6493 {
6494         int i;
6495         struct sk_buff *skb;
6496         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6497
6498         skb = txb->skb;
6499         txb->skb = NULL;
6500
6501         pci_unmap_single(tnapi->tp->pdev,
6502                          dma_unmap_addr(txb, mapping),
6503                          skb_headlen(skb),
6504                          PCI_DMA_TODEVICE);
6505
6506         while (txb->fragmented) {
6507                 txb->fragmented = false;
6508                 entry = NEXT_TX(entry);
6509                 txb = &tnapi->tx_buffers[entry];
6510         }
6511
6512         for (i = 0; i < last; i++) {
6513                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6514
6515                 entry = NEXT_TX(entry);
6516                 txb = &tnapi->tx_buffers[entry];
6517
6518                 pci_unmap_page(tnapi->tp->pdev,
6519                                dma_unmap_addr(txb, mapping),
6520                                frag->size, PCI_DMA_TODEVICE);
6521
6522                 while (txb->fragmented) {
6523                         txb->fragmented = false;
6524                         entry = NEXT_TX(entry);
6525                         txb = &tnapi->tx_buffers[entry];
6526                 }
6527         }
6528 }
6529
6530 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6531 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6532                                        struct sk_buff *skb,
6533                                        u32 *entry, u32 *budget,
6534                                        u32 base_flags, u32 mss, u32 vlan)
6535 {
6536         struct tg3 *tp = tnapi->tp;
6537         struct sk_buff *new_skb;
6538         dma_addr_t new_addr = 0;
6539         int ret = 0;
6540
6541         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6542                 new_skb = skb_copy(skb, GFP_ATOMIC);
6543         else {
6544                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6545
6546                 new_skb = skb_copy_expand(skb,
6547                                           skb_headroom(skb) + more_headroom,
6548                                           skb_tailroom(skb), GFP_ATOMIC);
6549         }
6550
6551         if (!new_skb) {
6552                 ret = -1;
6553         } else {
6554                 /* New SKB is guaranteed to be linear. */
6555                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6556                                           PCI_DMA_TODEVICE);
6557                 /* Make sure the mapping succeeded */
6558                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6559                         dev_kfree_skb(new_skb);
6560                         ret = -1;
6561                 } else {
6562                         base_flags |= TXD_FLAG_END;
6563
6564                         tnapi->tx_buffers[*entry].skb = new_skb;
6565                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6566                                            mapping, new_addr);
6567
6568                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6569                                             new_skb->len, base_flags,
6570                                             mss, vlan)) {
6571                                 tg3_tx_skb_unmap(tnapi, *entry, 0);
6572                                 dev_kfree_skb(new_skb);
6573                                 ret = -1;
6574                         }
6575                 }
6576         }
6577
6578         dev_kfree_skb(skb);
6579
6580         return ret;
6581 }
6582
6583 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6584
6585 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6586  * TSO header is greater than 80 bytes.
6587  */
6588 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6589 {
6590         struct sk_buff *segs, *nskb;
6591         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6592
6593         /* Estimate the number of fragments in the worst case */
6594         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6595                 netif_stop_queue(tp->dev);
6596
6597                 /* netif_tx_stop_queue() must be done before checking
6598                  * checking tx index in tg3_tx_avail() below, because in
6599                  * tg3_tx(), we update tx index before checking for
6600                  * netif_tx_queue_stopped().
6601                  */
6602                 smp_mb();
6603                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6604                         return NETDEV_TX_BUSY;
6605
6606                 netif_wake_queue(tp->dev);
6607         }
6608
6609         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6610         if (IS_ERR(segs))
6611                 goto tg3_tso_bug_end;
6612
6613         do {
6614                 nskb = segs;
6615                 segs = segs->next;
6616                 nskb->next = NULL;
6617                 tg3_start_xmit(nskb, tp->dev);
6618         } while (segs);
6619
6620 tg3_tso_bug_end:
6621         dev_kfree_skb(skb);
6622
6623         return NETDEV_TX_OK;
6624 }
6625
6626 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6627  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6628  */
6629 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6630 {
6631         struct tg3 *tp = netdev_priv(dev);
6632         u32 len, entry, base_flags, mss, vlan = 0;
6633         u32 budget;
6634         int i = -1, would_hit_hwbug;
6635         dma_addr_t mapping;
6636         struct tg3_napi *tnapi;
6637         struct netdev_queue *txq;
6638         unsigned int last;
6639
6640         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6641         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6642         if (tg3_flag(tp, ENABLE_TSS))
6643                 tnapi++;
6644
6645         budget = tg3_tx_avail(tnapi);
6646
6647         /* We are running in BH disabled context with netif_tx_lock
6648          * and TX reclaim runs via tp->napi.poll inside of a software
6649          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6650          * no IRQ context deadlocks to worry about either.  Rejoice!
6651          */
6652         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6653                 if (!netif_tx_queue_stopped(txq)) {
6654                         netif_tx_stop_queue(txq);
6655
6656                         /* This is a hard error, log it. */
6657                         netdev_err(dev,
6658                                    "BUG! Tx Ring full when queue awake!\n");
6659                 }
6660                 return NETDEV_TX_BUSY;
6661         }
6662
6663         entry = tnapi->tx_prod;
6664         base_flags = 0;
6665         if (skb->ip_summed == CHECKSUM_PARTIAL)
6666                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6667
6668         mss = skb_shinfo(skb)->gso_size;
6669         if (mss) {
6670                 struct iphdr *iph;
6671                 u32 tcp_opt_len, hdr_len;
6672
6673                 if (skb_header_cloned(skb) &&
6674                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
6675                         dev_kfree_skb(skb);
6676                         goto out_unlock;
6677                 }
6678
6679                 iph = ip_hdr(skb);
6680                 tcp_opt_len = tcp_optlen(skb);
6681
6682                 if (skb_is_gso_v6(skb)) {
6683                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6684                 } else {
6685                         u32 ip_tcp_len;
6686
6687                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6688                         hdr_len = ip_tcp_len + tcp_opt_len;
6689
6690                         iph->check = 0;
6691                         iph->tot_len = htons(mss + hdr_len);
6692                 }
6693
6694                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6695                     tg3_flag(tp, TSO_BUG))
6696                         return tg3_tso_bug(tp, skb);
6697
6698                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6699                                TXD_FLAG_CPU_POST_DMA);
6700
6701                 if (tg3_flag(tp, HW_TSO_1) ||
6702                     tg3_flag(tp, HW_TSO_2) ||
6703                     tg3_flag(tp, HW_TSO_3)) {
6704                         tcp_hdr(skb)->check = 0;
6705                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6706                 } else
6707                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6708                                                                  iph->daddr, 0,
6709                                                                  IPPROTO_TCP,
6710                                                                  0);
6711
6712                 if (tg3_flag(tp, HW_TSO_3)) {
6713                         mss |= (hdr_len & 0xc) << 12;
6714                         if (hdr_len & 0x10)
6715                                 base_flags |= 0x00000010;
6716                         base_flags |= (hdr_len & 0x3e0) << 5;
6717                 } else if (tg3_flag(tp, HW_TSO_2))
6718                         mss |= hdr_len << 9;
6719                 else if (tg3_flag(tp, HW_TSO_1) ||
6720                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6721                         if (tcp_opt_len || iph->ihl > 5) {
6722                                 int tsflags;
6723
6724                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6725                                 mss |= (tsflags << 11);
6726                         }
6727                 } else {
6728                         if (tcp_opt_len || iph->ihl > 5) {
6729                                 int tsflags;
6730
6731                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6732                                 base_flags |= tsflags << 12;
6733                         }
6734                 }
6735         }
6736
6737         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6738             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6739                 base_flags |= TXD_FLAG_JMB_PKT;
6740
6741         if (vlan_tx_tag_present(skb)) {
6742                 base_flags |= TXD_FLAG_VLAN;
6743                 vlan = vlan_tx_tag_get(skb);
6744         }
6745
6746         len = skb_headlen(skb);
6747
6748         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6749         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6750                 dev_kfree_skb(skb);
6751                 goto out_unlock;
6752         }
6753
6754         tnapi->tx_buffers[entry].skb = skb;
6755         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6756
6757         would_hit_hwbug = 0;
6758
6759         if (tg3_flag(tp, 5701_DMA_BUG))
6760                 would_hit_hwbug = 1;
6761
6762         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6763                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6764                             mss, vlan))
6765                 would_hit_hwbug = 1;
6766
6767         /* Now loop through additional data fragments, and queue them. */
6768         if (skb_shinfo(skb)->nr_frags > 0) {
6769                 u32 tmp_mss = mss;
6770
6771                 if (!tg3_flag(tp, HW_TSO_1) &&
6772                     !tg3_flag(tp, HW_TSO_2) &&
6773                     !tg3_flag(tp, HW_TSO_3))
6774                         tmp_mss = 0;
6775
6776                 last = skb_shinfo(skb)->nr_frags - 1;
6777                 for (i = 0; i <= last; i++) {
6778                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6779
6780                         len = frag->size;
6781                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6782                                                    len, DMA_TO_DEVICE);
6783
6784                         tnapi->tx_buffers[entry].skb = NULL;
6785                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6786                                            mapping);
6787                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6788                                 goto dma_error;
6789
6790                         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6791                                             len, base_flags |
6792                                             ((i == last) ? TXD_FLAG_END : 0),
6793                                             tmp_mss, vlan))
6794                                 would_hit_hwbug = 1;
6795                 }
6796         }
6797
6798         if (would_hit_hwbug) {
6799                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6800
6801                 /* If the workaround fails due to memory/mapping
6802                  * failure, silently drop this packet.
6803                  */
6804                 entry = tnapi->tx_prod;
6805                 budget = tg3_tx_avail(tnapi);
6806                 if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
6807                                                 base_flags, mss, vlan))
6808                         goto out_unlock;
6809         }
6810
6811         skb_tx_timestamp(skb);
6812
6813         /* Packets are ready, update Tx producer idx local and on card. */
6814         tw32_tx_mbox(tnapi->prodmbox, entry);
6815
6816         tnapi->tx_prod = entry;
6817         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6818                 netif_tx_stop_queue(txq);
6819
6820                 /* netif_tx_stop_queue() must be done before checking
6821                  * checking tx index in tg3_tx_avail() below, because in
6822                  * tg3_tx(), we update tx index before checking for
6823                  * netif_tx_queue_stopped().
6824                  */
6825                 smp_mb();
6826                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6827                         netif_tx_wake_queue(txq);
6828         }
6829
6830 out_unlock:
6831         mmiowb();
6832
6833         return NETDEV_TX_OK;
6834
6835 dma_error:
6836         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6837         dev_kfree_skb(skb);
6838         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6839         return NETDEV_TX_OK;
6840 }
6841
6842 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6843 {
6844         if (enable) {
6845                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6846                                   MAC_MODE_PORT_MODE_MASK);
6847
6848                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6849
6850                 if (!tg3_flag(tp, 5705_PLUS))
6851                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6852
6853                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6854                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6855                 else
6856                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6857         } else {
6858                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6859
6860                 if (tg3_flag(tp, 5705_PLUS) ||
6861                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6862                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6863                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6864         }
6865
6866         tw32(MAC_MODE, tp->mac_mode);
6867         udelay(40);
6868 }
6869
6870 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6871 {
6872         u32 val, bmcr, mac_mode, ptest = 0;
6873
6874         tg3_phy_toggle_apd(tp, false);
6875         tg3_phy_toggle_automdix(tp, 0);
6876
6877         if (extlpbk && tg3_phy_set_extloopbk(tp))
6878                 return -EIO;
6879
6880         bmcr = BMCR_FULLDPLX;
6881         switch (speed) {
6882         case SPEED_10:
6883                 break;
6884         case SPEED_100:
6885                 bmcr |= BMCR_SPEED100;
6886                 break;
6887         case SPEED_1000:
6888         default:
6889                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6890                         speed = SPEED_100;
6891                         bmcr |= BMCR_SPEED100;
6892                 } else {
6893                         speed = SPEED_1000;
6894                         bmcr |= BMCR_SPEED1000;
6895                 }
6896         }
6897
6898         if (extlpbk) {
6899                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6900                         tg3_readphy(tp, MII_CTRL1000, &val);
6901                         val |= CTL1000_AS_MASTER |
6902                                CTL1000_ENABLE_MASTER;
6903                         tg3_writephy(tp, MII_CTRL1000, val);
6904                 } else {
6905                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6906                                 MII_TG3_FET_PTEST_TRIM_2;
6907                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6908                 }
6909         } else
6910                 bmcr |= BMCR_LOOPBACK;
6911
6912         tg3_writephy(tp, MII_BMCR, bmcr);
6913
6914         /* The write needs to be flushed for the FETs */
6915         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6916                 tg3_readphy(tp, MII_BMCR, &bmcr);
6917
6918         udelay(40);
6919
6920         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6921             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6922                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6923                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6924                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6925
6926                 /* The write needs to be flushed for the AC131 */
6927                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6928         }
6929
6930         /* Reset to prevent losing 1st rx packet intermittently */
6931         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6932             tg3_flag(tp, 5780_CLASS)) {
6933                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6934                 udelay(10);
6935                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6936         }
6937
6938         mac_mode = tp->mac_mode &
6939                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6940         if (speed == SPEED_1000)
6941                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6942         else
6943                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6944
6945         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6946                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6947
6948                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6949                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6950                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6951                         mac_mode |= MAC_MODE_LINK_POLARITY;
6952
6953                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6954                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6955         }
6956
6957         tw32(MAC_MODE, mac_mode);
6958         udelay(40);
6959
6960         return 0;
6961 }
6962
6963 static void tg3_set_loopback(struct net_device *dev, u32 features)
6964 {
6965         struct tg3 *tp = netdev_priv(dev);
6966
6967         if (features & NETIF_F_LOOPBACK) {
6968                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6969                         return;
6970
6971                 spin_lock_bh(&tp->lock);
6972                 tg3_mac_loopback(tp, true);
6973                 netif_carrier_on(tp->dev);
6974                 spin_unlock_bh(&tp->lock);
6975                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6976         } else {
6977                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6978                         return;
6979
6980                 spin_lock_bh(&tp->lock);
6981                 tg3_mac_loopback(tp, false);
6982                 /* Force link status check */
6983                 tg3_setup_phy(tp, 1);
6984                 spin_unlock_bh(&tp->lock);
6985                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6986         }
6987 }
6988
6989 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6990 {
6991         struct tg3 *tp = netdev_priv(dev);
6992
6993         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6994                 features &= ~NETIF_F_ALL_TSO;
6995
6996         return features;
6997 }
6998
6999 static int tg3_set_features(struct net_device *dev, u32 features)
7000 {
7001         u32 changed = dev->features ^ features;
7002
7003         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7004                 tg3_set_loopback(dev, features);
7005
7006         return 0;
7007 }
7008
7009 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7010                                int new_mtu)
7011 {
7012         dev->mtu = new_mtu;
7013
7014         if (new_mtu > ETH_DATA_LEN) {
7015                 if (tg3_flag(tp, 5780_CLASS)) {
7016                         netdev_update_features(dev);
7017                         tg3_flag_clear(tp, TSO_CAPABLE);
7018                 } else {
7019                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7020                 }
7021         } else {
7022                 if (tg3_flag(tp, 5780_CLASS)) {
7023                         tg3_flag_set(tp, TSO_CAPABLE);
7024                         netdev_update_features(dev);
7025                 }
7026                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7027         }
7028 }
7029
7030 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7031 {
7032         struct tg3 *tp = netdev_priv(dev);
7033         int err;
7034
7035         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7036                 return -EINVAL;
7037
7038         if (!netif_running(dev)) {
7039                 /* We'll just catch it later when the
7040                  * device is up'd.
7041                  */
7042                 tg3_set_mtu(dev, tp, new_mtu);
7043                 return 0;
7044         }
7045
7046         tg3_phy_stop(tp);
7047
7048         tg3_netif_stop(tp);
7049
7050         tg3_full_lock(tp, 1);
7051
7052         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7053
7054         tg3_set_mtu(dev, tp, new_mtu);
7055
7056         err = tg3_restart_hw(tp, 0);
7057
7058         if (!err)
7059                 tg3_netif_start(tp);
7060
7061         tg3_full_unlock(tp);
7062
7063         if (!err)
7064                 tg3_phy_start(tp);
7065
7066         return err;
7067 }
7068
7069 static void tg3_rx_prodring_free(struct tg3 *tp,
7070                                  struct tg3_rx_prodring_set *tpr)
7071 {
7072         int i;
7073
7074         if (tpr != &tp->napi[0].prodring) {
7075                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7076                      i = (i + 1) & tp->rx_std_ring_mask)
7077                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7078                                         tp->rx_pkt_map_sz);
7079
7080                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7081                         for (i = tpr->rx_jmb_cons_idx;
7082                              i != tpr->rx_jmb_prod_idx;
7083                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7084                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7085                                                 TG3_RX_JMB_MAP_SZ);
7086                         }
7087                 }
7088
7089                 return;
7090         }
7091
7092         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7093                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7094                                 tp->rx_pkt_map_sz);
7095
7096         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7097                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7098                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7099                                         TG3_RX_JMB_MAP_SZ);
7100         }
7101 }
7102
7103 /* Initialize rx rings for packet processing.
7104  *
7105  * The chip has been shut down and the driver detached from
7106  * the networking, so no interrupts or new tx packets will
7107  * end up in the driver.  tp->{tx,}lock are held and thus
7108  * we may not sleep.
7109  */
7110 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7111                                  struct tg3_rx_prodring_set *tpr)
7112 {
7113         u32 i, rx_pkt_dma_sz;
7114
7115         tpr->rx_std_cons_idx = 0;
7116         tpr->rx_std_prod_idx = 0;
7117         tpr->rx_jmb_cons_idx = 0;
7118         tpr->rx_jmb_prod_idx = 0;
7119
7120         if (tpr != &tp->napi[0].prodring) {
7121                 memset(&tpr->rx_std_buffers[0], 0,
7122                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7123                 if (tpr->rx_jmb_buffers)
7124                         memset(&tpr->rx_jmb_buffers[0], 0,
7125                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7126                 goto done;
7127         }
7128
7129         /* Zero out all descriptors. */
7130         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7131
7132         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7133         if (tg3_flag(tp, 5780_CLASS) &&
7134             tp->dev->mtu > ETH_DATA_LEN)
7135                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7136         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7137
7138         /* Initialize invariants of the rings, we only set this
7139          * stuff once.  This works because the card does not
7140          * write into the rx buffer posting rings.
7141          */
7142         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7143                 struct tg3_rx_buffer_desc *rxd;
7144
7145                 rxd = &tpr->rx_std[i];
7146                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7147                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7148                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7149                                (i << RXD_OPAQUE_INDEX_SHIFT));
7150         }
7151
7152         /* Now allocate fresh SKBs for each rx ring. */
7153         for (i = 0; i < tp->rx_pending; i++) {
7154                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7155                         netdev_warn(tp->dev,
7156                                     "Using a smaller RX standard ring. Only "
7157                                     "%d out of %d buffers were allocated "
7158                                     "successfully\n", i, tp->rx_pending);
7159                         if (i == 0)
7160                                 goto initfail;
7161                         tp->rx_pending = i;
7162                         break;
7163                 }
7164         }
7165
7166         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7167                 goto done;
7168
7169         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7170
7171         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7172                 goto done;
7173
7174         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7175                 struct tg3_rx_buffer_desc *rxd;
7176
7177                 rxd = &tpr->rx_jmb[i].std;
7178                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7179                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7180                                   RXD_FLAG_JUMBO;
7181                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7182                        (i << RXD_OPAQUE_INDEX_SHIFT));
7183         }
7184
7185         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7186                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7187                         netdev_warn(tp->dev,
7188                                     "Using a smaller RX jumbo ring. Only %d "
7189                                     "out of %d buffers were allocated "
7190                                     "successfully\n", i, tp->rx_jumbo_pending);
7191                         if (i == 0)
7192                                 goto initfail;
7193                         tp->rx_jumbo_pending = i;
7194                         break;
7195                 }
7196         }
7197
7198 done:
7199         return 0;
7200
7201 initfail:
7202         tg3_rx_prodring_free(tp, tpr);
7203         return -ENOMEM;
7204 }
7205
7206 static void tg3_rx_prodring_fini(struct tg3 *tp,
7207                                  struct tg3_rx_prodring_set *tpr)
7208 {
7209         kfree(tpr->rx_std_buffers);
7210         tpr->rx_std_buffers = NULL;
7211         kfree(tpr->rx_jmb_buffers);
7212         tpr->rx_jmb_buffers = NULL;
7213         if (tpr->rx_std) {
7214                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7215                                   tpr->rx_std, tpr->rx_std_mapping);
7216                 tpr->rx_std = NULL;
7217         }
7218         if (tpr->rx_jmb) {
7219                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7220                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7221                 tpr->rx_jmb = NULL;
7222         }
7223 }
7224
7225 static int tg3_rx_prodring_init(struct tg3 *tp,
7226                                 struct tg3_rx_prodring_set *tpr)
7227 {
7228         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7229                                       GFP_KERNEL);
7230         if (!tpr->rx_std_buffers)
7231                 return -ENOMEM;
7232
7233         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7234                                          TG3_RX_STD_RING_BYTES(tp),
7235                                          &tpr->rx_std_mapping,
7236                                          GFP_KERNEL);
7237         if (!tpr->rx_std)
7238                 goto err_out;
7239
7240         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7241                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7242                                               GFP_KERNEL);
7243                 if (!tpr->rx_jmb_buffers)
7244                         goto err_out;
7245
7246                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7247                                                  TG3_RX_JMB_RING_BYTES(tp),
7248                                                  &tpr->rx_jmb_mapping,
7249                                                  GFP_KERNEL);
7250                 if (!tpr->rx_jmb)
7251                         goto err_out;
7252         }
7253
7254         return 0;
7255
7256 err_out:
7257         tg3_rx_prodring_fini(tp, tpr);
7258         return -ENOMEM;
7259 }
7260
7261 /* Free up pending packets in all rx/tx rings.
7262  *
7263  * The chip has been shut down and the driver detached from
7264  * the networking, so no interrupts or new tx packets will
7265  * end up in the driver.  tp->{tx,}lock is not held and we are not
7266  * in an interrupt context and thus may sleep.
7267  */
7268 static void tg3_free_rings(struct tg3 *tp)
7269 {
7270         int i, j;
7271
7272         for (j = 0; j < tp->irq_cnt; j++) {
7273                 struct tg3_napi *tnapi = &tp->napi[j];
7274
7275                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7276
7277                 if (!tnapi->tx_buffers)
7278                         continue;
7279
7280                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7281                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7282
7283                         if (!skb)
7284                                 continue;
7285
7286                         tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
7287
7288                         dev_kfree_skb_any(skb);
7289                 }
7290         }
7291 }
7292
7293 /* Initialize tx/rx rings for packet processing.
7294  *
7295  * The chip has been shut down and the driver detached from
7296  * the networking, so no interrupts or new tx packets will
7297  * end up in the driver.  tp->{tx,}lock are held and thus
7298  * we may not sleep.
7299  */
7300 static int tg3_init_rings(struct tg3 *tp)
7301 {
7302         int i;
7303
7304         /* Free up all the SKBs. */
7305         tg3_free_rings(tp);
7306
7307         for (i = 0; i < tp->irq_cnt; i++) {
7308                 struct tg3_napi *tnapi = &tp->napi[i];
7309
7310                 tnapi->last_tag = 0;
7311                 tnapi->last_irq_tag = 0;
7312                 tnapi->hw_status->status = 0;
7313                 tnapi->hw_status->status_tag = 0;
7314                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7315
7316                 tnapi->tx_prod = 0;
7317                 tnapi->tx_cons = 0;
7318                 if (tnapi->tx_ring)
7319                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7320
7321                 tnapi->rx_rcb_ptr = 0;
7322                 if (tnapi->rx_rcb)
7323                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7324
7325                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7326                         tg3_free_rings(tp);
7327                         return -ENOMEM;
7328                 }
7329         }
7330
7331         return 0;
7332 }
7333
7334 /*
7335  * Must not be invoked with interrupt sources disabled and
7336  * the hardware shutdown down.
7337  */
7338 static void tg3_free_consistent(struct tg3 *tp)
7339 {
7340         int i;
7341
7342         for (i = 0; i < tp->irq_cnt; i++) {
7343                 struct tg3_napi *tnapi = &tp->napi[i];
7344
7345                 if (tnapi->tx_ring) {
7346                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7347                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7348                         tnapi->tx_ring = NULL;
7349                 }
7350
7351                 kfree(tnapi->tx_buffers);
7352                 tnapi->tx_buffers = NULL;
7353
7354                 if (tnapi->rx_rcb) {
7355                         dma_free_coherent(&tp->pdev->dev,
7356                                           TG3_RX_RCB_RING_BYTES(tp),
7357                                           tnapi->rx_rcb,
7358                                           tnapi->rx_rcb_mapping);
7359                         tnapi->rx_rcb = NULL;
7360                 }
7361
7362                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7363
7364                 if (tnapi->hw_status) {
7365                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7366                                           tnapi->hw_status,
7367                                           tnapi->status_mapping);
7368                         tnapi->hw_status = NULL;
7369                 }
7370         }
7371
7372         if (tp->hw_stats) {
7373                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7374                                   tp->hw_stats, tp->stats_mapping);
7375                 tp->hw_stats = NULL;
7376         }
7377 }
7378
7379 /*
7380  * Must not be invoked with interrupt sources disabled and
7381  * the hardware shutdown down.  Can sleep.
7382  */
7383 static int tg3_alloc_consistent(struct tg3 *tp)
7384 {
7385         int i;
7386
7387         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7388                                           sizeof(struct tg3_hw_stats),
7389                                           &tp->stats_mapping,
7390                                           GFP_KERNEL);
7391         if (!tp->hw_stats)
7392                 goto err_out;
7393
7394         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7395
7396         for (i = 0; i < tp->irq_cnt; i++) {
7397                 struct tg3_napi *tnapi = &tp->napi[i];
7398                 struct tg3_hw_status *sblk;
7399
7400                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7401                                                       TG3_HW_STATUS_SIZE,
7402                                                       &tnapi->status_mapping,
7403                                                       GFP_KERNEL);
7404                 if (!tnapi->hw_status)
7405                         goto err_out;
7406
7407                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7408                 sblk = tnapi->hw_status;
7409
7410                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7411                         goto err_out;
7412
7413                 /* If multivector TSS is enabled, vector 0 does not handle
7414                  * tx interrupts.  Don't allocate any resources for it.
7415                  */
7416                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7417                     (i && tg3_flag(tp, ENABLE_TSS))) {
7418                         tnapi->tx_buffers = kzalloc(
7419                                                sizeof(struct tg3_tx_ring_info) *
7420                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7421                         if (!tnapi->tx_buffers)
7422                                 goto err_out;
7423
7424                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7425                                                             TG3_TX_RING_BYTES,
7426                                                         &tnapi->tx_desc_mapping,
7427                                                             GFP_KERNEL);
7428                         if (!tnapi->tx_ring)
7429                                 goto err_out;
7430                 }
7431
7432                 /*
7433                  * When RSS is enabled, the status block format changes
7434                  * slightly.  The "rx_jumbo_consumer", "reserved",
7435                  * and "rx_mini_consumer" members get mapped to the
7436                  * other three rx return ring producer indexes.
7437                  */
7438                 switch (i) {
7439                 default:
7440                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7441                         break;
7442                 case 2:
7443                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7444                         break;
7445                 case 3:
7446                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7447                         break;
7448                 case 4:
7449                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7450                         break;
7451                 }
7452
7453                 /*
7454                  * If multivector RSS is enabled, vector 0 does not handle
7455                  * rx or tx interrupts.  Don't allocate any resources for it.
7456                  */
7457                 if (!i && tg3_flag(tp, ENABLE_RSS))
7458                         continue;
7459
7460                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7461                                                    TG3_RX_RCB_RING_BYTES(tp),
7462                                                    &tnapi->rx_rcb_mapping,
7463                                                    GFP_KERNEL);
7464                 if (!tnapi->rx_rcb)
7465                         goto err_out;
7466
7467                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7468         }
7469
7470         return 0;
7471
7472 err_out:
7473         tg3_free_consistent(tp);
7474         return -ENOMEM;
7475 }
7476
7477 #define MAX_WAIT_CNT 1000
7478
7479 /* To stop a block, clear the enable bit and poll till it
7480  * clears.  tp->lock is held.
7481  */
7482 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7483 {
7484         unsigned int i;
7485         u32 val;
7486
7487         if (tg3_flag(tp, 5705_PLUS)) {
7488                 switch (ofs) {
7489                 case RCVLSC_MODE:
7490                 case DMAC_MODE:
7491                 case MBFREE_MODE:
7492                 case BUFMGR_MODE:
7493                 case MEMARB_MODE:
7494                         /* We can't enable/disable these bits of the
7495                          * 5705/5750, just say success.
7496                          */
7497                         return 0;
7498
7499                 default:
7500                         break;
7501                 }
7502         }
7503
7504         val = tr32(ofs);
7505         val &= ~enable_bit;
7506         tw32_f(ofs, val);
7507
7508         for (i = 0; i < MAX_WAIT_CNT; i++) {
7509                 udelay(100);
7510                 val = tr32(ofs);
7511                 if ((val & enable_bit) == 0)
7512                         break;
7513         }
7514
7515         if (i == MAX_WAIT_CNT && !silent) {
7516                 dev_err(&tp->pdev->dev,
7517                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7518                         ofs, enable_bit);
7519                 return -ENODEV;
7520         }
7521
7522         return 0;
7523 }
7524
7525 /* tp->lock is held. */
7526 static int tg3_abort_hw(struct tg3 *tp, int silent)
7527 {
7528         int i, err;
7529
7530         tg3_disable_ints(tp);
7531
7532         tp->rx_mode &= ~RX_MODE_ENABLE;
7533         tw32_f(MAC_RX_MODE, tp->rx_mode);
7534         udelay(10);
7535
7536         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7537         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7538         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7539         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7540         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7541         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7542
7543         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7544         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7545         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7546         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7547         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7548         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7549         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7550
7551         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7552         tw32_f(MAC_MODE, tp->mac_mode);
7553         udelay(40);
7554
7555         tp->tx_mode &= ~TX_MODE_ENABLE;
7556         tw32_f(MAC_TX_MODE, tp->tx_mode);
7557
7558         for (i = 0; i < MAX_WAIT_CNT; i++) {
7559                 udelay(100);
7560                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7561                         break;
7562         }
7563         if (i >= MAX_WAIT_CNT) {
7564                 dev_err(&tp->pdev->dev,
7565                         "%s timed out, TX_MODE_ENABLE will not clear "
7566                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7567                 err |= -ENODEV;
7568         }
7569
7570         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7571         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7572         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7573
7574         tw32(FTQ_RESET, 0xffffffff);
7575         tw32(FTQ_RESET, 0x00000000);
7576
7577         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7578         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7579
7580         for (i = 0; i < tp->irq_cnt; i++) {
7581                 struct tg3_napi *tnapi = &tp->napi[i];
7582                 if (tnapi->hw_status)
7583                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7584         }
7585         if (tp->hw_stats)
7586                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7587
7588         return err;
7589 }
7590
7591 /* Save PCI command register before chip reset */
7592 static void tg3_save_pci_state(struct tg3 *tp)
7593 {
7594         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7595 }
7596
7597 /* Restore PCI state after chip reset */
7598 static void tg3_restore_pci_state(struct tg3 *tp)
7599 {
7600         u32 val;
7601
7602         /* Re-enable indirect register accesses. */
7603         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7604                                tp->misc_host_ctrl);
7605
7606         /* Set MAX PCI retry to zero. */
7607         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7608         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7609             tg3_flag(tp, PCIX_MODE))
7610                 val |= PCISTATE_RETRY_SAME_DMA;
7611         /* Allow reads and writes to the APE register and memory space. */
7612         if (tg3_flag(tp, ENABLE_APE))
7613                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7614                        PCISTATE_ALLOW_APE_SHMEM_WR |
7615                        PCISTATE_ALLOW_APE_PSPACE_WR;
7616         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7617
7618         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7619
7620         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7621                 if (tg3_flag(tp, PCI_EXPRESS))
7622                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7623                 else {
7624                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7625                                               tp->pci_cacheline_sz);
7626                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7627                                               tp->pci_lat_timer);
7628                 }
7629         }
7630
7631         /* Make sure PCI-X relaxed ordering bit is clear. */
7632         if (tg3_flag(tp, PCIX_MODE)) {
7633                 u16 pcix_cmd;
7634
7635                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7636                                      &pcix_cmd);
7637                 pcix_cmd &= ~PCI_X_CMD_ERO;
7638                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7639                                       pcix_cmd);
7640         }
7641
7642         if (tg3_flag(tp, 5780_CLASS)) {
7643
7644                 /* Chip reset on 5780 will reset MSI enable bit,
7645                  * so need to restore it.
7646                  */
7647                 if (tg3_flag(tp, USING_MSI)) {
7648                         u16 ctrl;
7649
7650                         pci_read_config_word(tp->pdev,
7651                                              tp->msi_cap + PCI_MSI_FLAGS,
7652                                              &ctrl);
7653                         pci_write_config_word(tp->pdev,
7654                                               tp->msi_cap + PCI_MSI_FLAGS,
7655                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7656                         val = tr32(MSGINT_MODE);
7657                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7658                 }
7659         }
7660 }
7661
7662 /* tp->lock is held. */
7663 static int tg3_chip_reset(struct tg3 *tp)
7664 {
7665         u32 val;
7666         void (*write_op)(struct tg3 *, u32, u32);
7667         int i, err;
7668
7669         tg3_nvram_lock(tp);
7670
7671         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7672
7673         /* No matching tg3_nvram_unlock() after this because
7674          * chip reset below will undo the nvram lock.
7675          */
7676         tp->nvram_lock_cnt = 0;
7677
7678         /* GRC_MISC_CFG core clock reset will clear the memory
7679          * enable bit in PCI register 4 and the MSI enable bit
7680          * on some chips, so we save relevant registers here.
7681          */
7682         tg3_save_pci_state(tp);
7683
7684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7685             tg3_flag(tp, 5755_PLUS))
7686                 tw32(GRC_FASTBOOT_PC, 0);
7687
7688         /*
7689          * We must avoid the readl() that normally takes place.
7690          * It locks machines, causes machine checks, and other
7691          * fun things.  So, temporarily disable the 5701
7692          * hardware workaround, while we do the reset.
7693          */
7694         write_op = tp->write32;
7695         if (write_op == tg3_write_flush_reg32)
7696                 tp->write32 = tg3_write32;
7697
7698         /* Prevent the irq handler from reading or writing PCI registers
7699          * during chip reset when the memory enable bit in the PCI command
7700          * register may be cleared.  The chip does not generate interrupt
7701          * at this time, but the irq handler may still be called due to irq
7702          * sharing or irqpoll.
7703          */
7704         tg3_flag_set(tp, CHIP_RESETTING);
7705         for (i = 0; i < tp->irq_cnt; i++) {
7706                 struct tg3_napi *tnapi = &tp->napi[i];
7707                 if (tnapi->hw_status) {
7708                         tnapi->hw_status->status = 0;
7709                         tnapi->hw_status->status_tag = 0;
7710                 }
7711                 tnapi->last_tag = 0;
7712                 tnapi->last_irq_tag = 0;
7713         }
7714         smp_mb();
7715
7716         for (i = 0; i < tp->irq_cnt; i++)
7717                 synchronize_irq(tp->napi[i].irq_vec);
7718
7719         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7720                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7721                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7722         }
7723
7724         /* do the reset */
7725         val = GRC_MISC_CFG_CORECLK_RESET;
7726
7727         if (tg3_flag(tp, PCI_EXPRESS)) {
7728                 /* Force PCIe 1.0a mode */
7729                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7730                     !tg3_flag(tp, 57765_PLUS) &&
7731                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7732                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7733                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7734
7735                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7736                         tw32(GRC_MISC_CFG, (1 << 29));
7737                         val |= (1 << 29);
7738                 }
7739         }
7740
7741         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7742                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7743                 tw32(GRC_VCPU_EXT_CTRL,
7744                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7745         }
7746
7747         /* Manage gphy power for all CPMU absent PCIe devices. */
7748         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7749                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7750
7751         tw32(GRC_MISC_CFG, val);
7752
7753         /* restore 5701 hardware bug workaround write method */
7754         tp->write32 = write_op;
7755
7756         /* Unfortunately, we have to delay before the PCI read back.
7757          * Some 575X chips even will not respond to a PCI cfg access
7758          * when the reset command is given to the chip.
7759          *
7760          * How do these hardware designers expect things to work
7761          * properly if the PCI write is posted for a long period
7762          * of time?  It is always necessary to have some method by
7763          * which a register read back can occur to push the write
7764          * out which does the reset.
7765          *
7766          * For most tg3 variants the trick below was working.
7767          * Ho hum...
7768          */
7769         udelay(120);
7770
7771         /* Flush PCI posted writes.  The normal MMIO registers
7772          * are inaccessible at this time so this is the only
7773          * way to make this reliably (actually, this is no longer
7774          * the case, see above).  I tried to use indirect
7775          * register read/write but this upset some 5701 variants.
7776          */
7777         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7778
7779         udelay(120);
7780
7781         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7782                 u16 val16;
7783
7784                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7785                         int i;
7786                         u32 cfg_val;
7787
7788                         /* Wait for link training to complete.  */
7789                         for (i = 0; i < 5000; i++)
7790                                 udelay(100);
7791
7792                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7793                         pci_write_config_dword(tp->pdev, 0xc4,
7794                                                cfg_val | (1 << 15));
7795                 }
7796
7797                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7798                 pci_read_config_word(tp->pdev,
7799                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7800                                      &val16);
7801                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7802                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7803                 /*
7804                  * Older PCIe devices only support the 128 byte
7805                  * MPS setting.  Enforce the restriction.
7806                  */
7807                 if (!tg3_flag(tp, CPMU_PRESENT))
7808                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7809                 pci_write_config_word(tp->pdev,
7810                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7811                                       val16);
7812
7813                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7814
7815                 /* Clear error status */
7816                 pci_write_config_word(tp->pdev,
7817                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7818                                       PCI_EXP_DEVSTA_CED |
7819                                       PCI_EXP_DEVSTA_NFED |
7820                                       PCI_EXP_DEVSTA_FED |
7821                                       PCI_EXP_DEVSTA_URD);
7822         }
7823
7824         tg3_restore_pci_state(tp);
7825
7826         tg3_flag_clear(tp, CHIP_RESETTING);
7827         tg3_flag_clear(tp, ERROR_PROCESSED);
7828
7829         val = 0;
7830         if (tg3_flag(tp, 5780_CLASS))
7831                 val = tr32(MEMARB_MODE);
7832         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7833
7834         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7835                 tg3_stop_fw(tp);
7836                 tw32(0x5000, 0x400);
7837         }
7838
7839         tw32(GRC_MODE, tp->grc_mode);
7840
7841         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7842                 val = tr32(0xc4);
7843
7844                 tw32(0xc4, val | (1 << 15));
7845         }
7846
7847         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7848             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7849                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7850                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7851                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7852                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7853         }
7854
7855         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7856                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7857                 val = tp->mac_mode;
7858         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7859                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7860                 val = tp->mac_mode;
7861         } else
7862                 val = 0;
7863
7864         tw32_f(MAC_MODE, val);
7865         udelay(40);
7866
7867         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7868
7869         err = tg3_poll_fw(tp);
7870         if (err)
7871                 return err;
7872
7873         tg3_mdio_start(tp);
7874
7875         if (tg3_flag(tp, PCI_EXPRESS) &&
7876             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7877             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7878             !tg3_flag(tp, 57765_PLUS)) {
7879                 val = tr32(0x7c00);
7880
7881                 tw32(0x7c00, val | (1 << 25));
7882         }
7883
7884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7885                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7886                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7887         }
7888
7889         /* Reprobe ASF enable state.  */
7890         tg3_flag_clear(tp, ENABLE_ASF);
7891         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7892         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7893         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7894                 u32 nic_cfg;
7895
7896                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7897                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7898                         tg3_flag_set(tp, ENABLE_ASF);
7899                         tp->last_event_jiffies = jiffies;
7900                         if (tg3_flag(tp, 5750_PLUS))
7901                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7902                 }
7903         }
7904
7905         return 0;
7906 }
7907
7908 /* tp->lock is held. */
7909 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7910 {
7911         int err;
7912
7913         tg3_stop_fw(tp);
7914
7915         tg3_write_sig_pre_reset(tp, kind);
7916
7917         tg3_abort_hw(tp, silent);
7918         err = tg3_chip_reset(tp);
7919
7920         __tg3_set_mac_addr(tp, 0);
7921
7922         tg3_write_sig_legacy(tp, kind);
7923         tg3_write_sig_post_reset(tp, kind);
7924
7925         if (err)
7926                 return err;
7927
7928         return 0;
7929 }
7930
7931 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7932 {
7933         struct tg3 *tp = netdev_priv(dev);
7934         struct sockaddr *addr = p;
7935         int err = 0, skip_mac_1 = 0;
7936
7937         if (!is_valid_ether_addr(addr->sa_data))
7938                 return -EINVAL;
7939
7940         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7941
7942         if (!netif_running(dev))
7943                 return 0;
7944
7945         if (tg3_flag(tp, ENABLE_ASF)) {
7946                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7947
7948                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7949                 addr0_low = tr32(MAC_ADDR_0_LOW);
7950                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7951                 addr1_low = tr32(MAC_ADDR_1_LOW);
7952
7953                 /* Skip MAC addr 1 if ASF is using it. */
7954                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7955                     !(addr1_high == 0 && addr1_low == 0))
7956                         skip_mac_1 = 1;
7957         }
7958         spin_lock_bh(&tp->lock);
7959         __tg3_set_mac_addr(tp, skip_mac_1);
7960         spin_unlock_bh(&tp->lock);
7961
7962         return err;
7963 }
7964
7965 /* tp->lock is held. */
7966 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7967                            dma_addr_t mapping, u32 maxlen_flags,
7968                            u32 nic_addr)
7969 {
7970         tg3_write_mem(tp,
7971                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7972                       ((u64) mapping >> 32));
7973         tg3_write_mem(tp,
7974                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7975                       ((u64) mapping & 0xffffffff));
7976         tg3_write_mem(tp,
7977                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7978                        maxlen_flags);
7979
7980         if (!tg3_flag(tp, 5705_PLUS))
7981                 tg3_write_mem(tp,
7982                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7983                               nic_addr);
7984 }
7985
7986 static void __tg3_set_rx_mode(struct net_device *);
7987 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7988 {
7989         int i;
7990
7991         if (!tg3_flag(tp, ENABLE_TSS)) {
7992                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7993                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7994                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7995         } else {
7996                 tw32(HOSTCC_TXCOL_TICKS, 0);
7997                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7998                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7999         }
8000
8001         if (!tg3_flag(tp, ENABLE_RSS)) {
8002                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8003                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8004                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8005         } else {
8006                 tw32(HOSTCC_RXCOL_TICKS, 0);
8007                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8008                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8009         }
8010
8011         if (!tg3_flag(tp, 5705_PLUS)) {
8012                 u32 val = ec->stats_block_coalesce_usecs;
8013
8014                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8015                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8016
8017                 if (!netif_carrier_ok(tp->dev))
8018                         val = 0;
8019
8020                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8021         }
8022
8023         for (i = 0; i < tp->irq_cnt - 1; i++) {
8024                 u32 reg;
8025
8026                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8027                 tw32(reg, ec->rx_coalesce_usecs);
8028                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8029                 tw32(reg, ec->rx_max_coalesced_frames);
8030                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8031                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8032
8033                 if (tg3_flag(tp, ENABLE_TSS)) {
8034                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8035                         tw32(reg, ec->tx_coalesce_usecs);
8036                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8037                         tw32(reg, ec->tx_max_coalesced_frames);
8038                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8039                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8040                 }
8041         }
8042
8043         for (; i < tp->irq_max - 1; i++) {
8044                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8045                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8046                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8047
8048                 if (tg3_flag(tp, ENABLE_TSS)) {
8049                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8050                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8051                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8052                 }
8053         }
8054 }
8055
8056 /* tp->lock is held. */
8057 static void tg3_rings_reset(struct tg3 *tp)
8058 {
8059         int i;
8060         u32 stblk, txrcb, rxrcb, limit;
8061         struct tg3_napi *tnapi = &tp->napi[0];
8062
8063         /* Disable all transmit rings but the first. */
8064         if (!tg3_flag(tp, 5705_PLUS))
8065                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8066         else if (tg3_flag(tp, 5717_PLUS))
8067                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8068         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8069                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8070         else
8071                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8072
8073         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8074              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8075                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8076                               BDINFO_FLAGS_DISABLED);
8077
8078
8079         /* Disable all receive return rings but the first. */
8080         if (tg3_flag(tp, 5717_PLUS))
8081                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8082         else if (!tg3_flag(tp, 5705_PLUS))
8083                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8084         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8085                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8086                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8087         else
8088                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8089
8090         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8091              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8092                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8093                               BDINFO_FLAGS_DISABLED);
8094
8095         /* Disable interrupts */
8096         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8097         tp->napi[0].chk_msi_cnt = 0;
8098         tp->napi[0].last_rx_cons = 0;
8099         tp->napi[0].last_tx_cons = 0;
8100
8101         /* Zero mailbox registers. */
8102         if (tg3_flag(tp, SUPPORT_MSIX)) {
8103                 for (i = 1; i < tp->irq_max; i++) {
8104                         tp->napi[i].tx_prod = 0;
8105                         tp->napi[i].tx_cons = 0;
8106                         if (tg3_flag(tp, ENABLE_TSS))
8107                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8108                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8109                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8110                         tp->napi[i].chk_msi_cnt = 0;
8111                         tp->napi[i].last_rx_cons = 0;
8112                         tp->napi[i].last_tx_cons = 0;
8113                 }
8114                 if (!tg3_flag(tp, ENABLE_TSS))
8115                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8116         } else {
8117                 tp->napi[0].tx_prod = 0;
8118                 tp->napi[0].tx_cons = 0;
8119                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8120                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8121         }
8122
8123         /* Make sure the NIC-based send BD rings are disabled. */
8124         if (!tg3_flag(tp, 5705_PLUS)) {
8125                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8126                 for (i = 0; i < 16; i++)
8127                         tw32_tx_mbox(mbox + i * 8, 0);
8128         }
8129
8130         txrcb = NIC_SRAM_SEND_RCB;
8131         rxrcb = NIC_SRAM_RCV_RET_RCB;
8132
8133         /* Clear status block in ram. */
8134         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8135
8136         /* Set status block DMA address */
8137         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8138              ((u64) tnapi->status_mapping >> 32));
8139         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8140              ((u64) tnapi->status_mapping & 0xffffffff));
8141
8142         if (tnapi->tx_ring) {
8143                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8144                                (TG3_TX_RING_SIZE <<
8145                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8146                                NIC_SRAM_TX_BUFFER_DESC);
8147                 txrcb += TG3_BDINFO_SIZE;
8148         }
8149
8150         if (tnapi->rx_rcb) {
8151                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8152                                (tp->rx_ret_ring_mask + 1) <<
8153                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8154                 rxrcb += TG3_BDINFO_SIZE;
8155         }
8156
8157         stblk = HOSTCC_STATBLCK_RING1;
8158
8159         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8160                 u64 mapping = (u64)tnapi->status_mapping;
8161                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8162                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8163
8164                 /* Clear status block in ram. */
8165                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8166
8167                 if (tnapi->tx_ring) {
8168                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8169                                        (TG3_TX_RING_SIZE <<
8170                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8171                                        NIC_SRAM_TX_BUFFER_DESC);
8172                         txrcb += TG3_BDINFO_SIZE;
8173                 }
8174
8175                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8176                                ((tp->rx_ret_ring_mask + 1) <<
8177                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8178
8179                 stblk += 8;
8180                 rxrcb += TG3_BDINFO_SIZE;
8181         }
8182 }
8183
8184 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8185 {
8186         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8187
8188         if (!tg3_flag(tp, 5750_PLUS) ||
8189             tg3_flag(tp, 5780_CLASS) ||
8190             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8191             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8192                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8193         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8194                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8195                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8196         else
8197                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8198
8199         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8200         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8201
8202         val = min(nic_rep_thresh, host_rep_thresh);
8203         tw32(RCVBDI_STD_THRESH, val);
8204
8205         if (tg3_flag(tp, 57765_PLUS))
8206                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8207
8208         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8209                 return;
8210
8211         if (!tg3_flag(tp, 5705_PLUS))
8212                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8213         else
8214                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8215
8216         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8217
8218         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8219         tw32(RCVBDI_JUMBO_THRESH, val);
8220
8221         if (tg3_flag(tp, 57765_PLUS))
8222                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8223 }
8224
8225 /* tp->lock is held. */
8226 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8227 {
8228         u32 val, rdmac_mode;
8229         int i, err, limit;
8230         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8231
8232         tg3_disable_ints(tp);
8233
8234         tg3_stop_fw(tp);
8235
8236         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8237
8238         if (tg3_flag(tp, INIT_COMPLETE))
8239                 tg3_abort_hw(tp, 1);
8240
8241         /* Enable MAC control of LPI */
8242         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8243                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8244                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8245                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8246
8247                 tw32_f(TG3_CPMU_EEE_CTRL,
8248                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8249
8250                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8251                       TG3_CPMU_EEEMD_LPI_IN_TX |
8252                       TG3_CPMU_EEEMD_LPI_IN_RX |
8253                       TG3_CPMU_EEEMD_EEE_ENABLE;
8254
8255                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8256                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8257
8258                 if (tg3_flag(tp, ENABLE_APE))
8259                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8260
8261                 tw32_f(TG3_CPMU_EEE_MODE, val);
8262
8263                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8264                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8265                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8266
8267                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8268                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8269                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8270         }
8271
8272         if (reset_phy)
8273                 tg3_phy_reset(tp);
8274
8275         err = tg3_chip_reset(tp);
8276         if (err)
8277                 return err;
8278
8279         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8280
8281         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8282                 val = tr32(TG3_CPMU_CTRL);
8283                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8284                 tw32(TG3_CPMU_CTRL, val);
8285
8286                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8287                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8288                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8289                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8290
8291                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8292                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8293                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8294                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8295
8296                 val = tr32(TG3_CPMU_HST_ACC);
8297                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8298                 val |= CPMU_HST_ACC_MACCLK_6_25;
8299                 tw32(TG3_CPMU_HST_ACC, val);
8300         }
8301
8302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8303                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8304                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8305                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8306                 tw32(PCIE_PWR_MGMT_THRESH, val);
8307
8308                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8309                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8310
8311                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8312
8313                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8314                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8315         }
8316
8317         if (tg3_flag(tp, L1PLLPD_EN)) {
8318                 u32 grc_mode = tr32(GRC_MODE);
8319
8320                 /* Access the lower 1K of PL PCIE block registers. */
8321                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8322                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8323
8324                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8325                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8326                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8327
8328                 tw32(GRC_MODE, grc_mode);
8329         }
8330
8331         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8332                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8333                         u32 grc_mode = tr32(GRC_MODE);
8334
8335                         /* Access the lower 1K of PL PCIE block registers. */
8336                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8337                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8338
8339                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8340                                    TG3_PCIE_PL_LO_PHYCTL5);
8341                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8342                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8343
8344                         tw32(GRC_MODE, grc_mode);
8345                 }
8346
8347                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8348                         u32 grc_mode = tr32(GRC_MODE);
8349
8350                         /* Access the lower 1K of DL PCIE block registers. */
8351                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8352                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8353
8354                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8355                                    TG3_PCIE_DL_LO_FTSMAX);
8356                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8357                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8358                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8359
8360                         tw32(GRC_MODE, grc_mode);
8361                 }
8362
8363                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8364                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8365                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8366                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8367         }
8368
8369         /* This works around an issue with Athlon chipsets on
8370          * B3 tigon3 silicon.  This bit has no effect on any
8371          * other revision.  But do not set this on PCI Express
8372          * chips and don't even touch the clocks if the CPMU is present.
8373          */
8374         if (!tg3_flag(tp, CPMU_PRESENT)) {
8375                 if (!tg3_flag(tp, PCI_EXPRESS))
8376                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8377                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8378         }
8379
8380         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8381             tg3_flag(tp, PCIX_MODE)) {
8382                 val = tr32(TG3PCI_PCISTATE);
8383                 val |= PCISTATE_RETRY_SAME_DMA;
8384                 tw32(TG3PCI_PCISTATE, val);
8385         }
8386
8387         if (tg3_flag(tp, ENABLE_APE)) {
8388                 /* Allow reads and writes to the
8389                  * APE register and memory space.
8390                  */
8391                 val = tr32(TG3PCI_PCISTATE);
8392                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8393                        PCISTATE_ALLOW_APE_SHMEM_WR |
8394                        PCISTATE_ALLOW_APE_PSPACE_WR;
8395                 tw32(TG3PCI_PCISTATE, val);
8396         }
8397
8398         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8399                 /* Enable some hw fixes.  */
8400                 val = tr32(TG3PCI_MSI_DATA);
8401                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8402                 tw32(TG3PCI_MSI_DATA, val);
8403         }
8404
8405         /* Descriptor ring init may make accesses to the
8406          * NIC SRAM area to setup the TX descriptors, so we
8407          * can only do this after the hardware has been
8408          * successfully reset.
8409          */
8410         err = tg3_init_rings(tp);
8411         if (err)
8412                 return err;
8413
8414         if (tg3_flag(tp, 57765_PLUS)) {
8415                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8416                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8417                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8418                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8419                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8420                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8421                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8422                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8423         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8424                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8425                 /* This value is determined during the probe time DMA
8426                  * engine test, tg3_test_dma.
8427                  */
8428                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8429         }
8430
8431         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8432                           GRC_MODE_4X_NIC_SEND_RINGS |
8433                           GRC_MODE_NO_TX_PHDR_CSUM |
8434                           GRC_MODE_NO_RX_PHDR_CSUM);
8435         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8436
8437         /* Pseudo-header checksum is done by hardware logic and not
8438          * the offload processers, so make the chip do the pseudo-
8439          * header checksums on receive.  For transmit it is more
8440          * convenient to do the pseudo-header checksum in software
8441          * as Linux does that on transmit for us in all cases.
8442          */
8443         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8444
8445         tw32(GRC_MODE,
8446              tp->grc_mode |
8447              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8448
8449         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8450         val = tr32(GRC_MISC_CFG);
8451         val &= ~0xff;
8452         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8453         tw32(GRC_MISC_CFG, val);
8454
8455         /* Initialize MBUF/DESC pool. */
8456         if (tg3_flag(tp, 5750_PLUS)) {
8457                 /* Do nothing.  */
8458         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8459                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8460                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8461                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8462                 else
8463                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8464                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8465                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8466         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8467                 int fw_len;
8468
8469                 fw_len = tp->fw_len;
8470                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8471                 tw32(BUFMGR_MB_POOL_ADDR,
8472                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8473                 tw32(BUFMGR_MB_POOL_SIZE,
8474                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8475         }
8476
8477         if (tp->dev->mtu <= ETH_DATA_LEN) {
8478                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8479                      tp->bufmgr_config.mbuf_read_dma_low_water);
8480                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8481                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8482                 tw32(BUFMGR_MB_HIGH_WATER,
8483                      tp->bufmgr_config.mbuf_high_water);
8484         } else {
8485                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8486                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8487                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8488                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8489                 tw32(BUFMGR_MB_HIGH_WATER,
8490                      tp->bufmgr_config.mbuf_high_water_jumbo);
8491         }
8492         tw32(BUFMGR_DMA_LOW_WATER,
8493              tp->bufmgr_config.dma_low_water);
8494         tw32(BUFMGR_DMA_HIGH_WATER,
8495              tp->bufmgr_config.dma_high_water);
8496
8497         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8499                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8500         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8501             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8502             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8503                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8504         tw32(BUFMGR_MODE, val);
8505         for (i = 0; i < 2000; i++) {
8506                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8507                         break;
8508                 udelay(10);
8509         }
8510         if (i >= 2000) {
8511                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8512                 return -ENODEV;
8513         }
8514
8515         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8516                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8517
8518         tg3_setup_rxbd_thresholds(tp);
8519
8520         /* Initialize TG3_BDINFO's at:
8521          *  RCVDBDI_STD_BD:     standard eth size rx ring
8522          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8523          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8524          *
8525          * like so:
8526          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8527          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8528          *                              ring attribute flags
8529          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8530          *
8531          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8532          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8533          *
8534          * The size of each ring is fixed in the firmware, but the location is
8535          * configurable.
8536          */
8537         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8538              ((u64) tpr->rx_std_mapping >> 32));
8539         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8540              ((u64) tpr->rx_std_mapping & 0xffffffff));
8541         if (!tg3_flag(tp, 5717_PLUS))
8542                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8543                      NIC_SRAM_RX_BUFFER_DESC);
8544
8545         /* Disable the mini ring */
8546         if (!tg3_flag(tp, 5705_PLUS))
8547                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8548                      BDINFO_FLAGS_DISABLED);
8549
8550         /* Program the jumbo buffer descriptor ring control
8551          * blocks on those devices that have them.
8552          */
8553         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8554             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8555
8556                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8557                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8558                              ((u64) tpr->rx_jmb_mapping >> 32));
8559                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8560                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8561                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8562                               BDINFO_FLAGS_MAXLEN_SHIFT;
8563                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8564                              val | BDINFO_FLAGS_USE_EXT_RECV);
8565                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8566                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8567                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8568                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8569                 } else {
8570                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8571                              BDINFO_FLAGS_DISABLED);
8572                 }
8573
8574                 if (tg3_flag(tp, 57765_PLUS)) {
8575                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8576                                 val = TG3_RX_STD_MAX_SIZE_5700;
8577                         else
8578                                 val = TG3_RX_STD_MAX_SIZE_5717;
8579                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8580                         val |= (TG3_RX_STD_DMA_SZ << 2);
8581                 } else
8582                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8583         } else
8584                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8585
8586         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8587
8588         tpr->rx_std_prod_idx = tp->rx_pending;
8589         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8590
8591         tpr->rx_jmb_prod_idx =
8592                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8593         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8594
8595         tg3_rings_reset(tp);
8596
8597         /* Initialize MAC address and backoff seed. */
8598         __tg3_set_mac_addr(tp, 0);
8599
8600         /* MTU + ethernet header + FCS + optional VLAN tag */
8601         tw32(MAC_RX_MTU_SIZE,
8602              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8603
8604         /* The slot time is changed by tg3_setup_phy if we
8605          * run at gigabit with half duplex.
8606          */
8607         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8608               (6 << TX_LENGTHS_IPG_SHIFT) |
8609               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8610
8611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8612                 val |= tr32(MAC_TX_LENGTHS) &
8613                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8614                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8615
8616         tw32(MAC_TX_LENGTHS, val);
8617
8618         /* Receive rules. */
8619         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8620         tw32(RCVLPC_CONFIG, 0x0181);
8621
8622         /* Calculate RDMAC_MODE setting early, we need it to determine
8623          * the RCVLPC_STATE_ENABLE mask.
8624          */
8625         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8626                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8627                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8628                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8629                       RDMAC_MODE_LNGREAD_ENAB);
8630
8631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8632                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8633
8634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8635             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8636             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8637                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8638                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8639                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8640
8641         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8642             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8643                 if (tg3_flag(tp, TSO_CAPABLE) &&
8644                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8645                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8646                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8647                            !tg3_flag(tp, IS_5788)) {
8648                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8649                 }
8650         }
8651
8652         if (tg3_flag(tp, PCI_EXPRESS))
8653                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8654
8655         if (tg3_flag(tp, HW_TSO_1) ||
8656             tg3_flag(tp, HW_TSO_2) ||
8657             tg3_flag(tp, HW_TSO_3))
8658                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8659
8660         if (tg3_flag(tp, 57765_PLUS) ||
8661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8662             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8663                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8664
8665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8666                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8667
8668         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8669             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8670             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8672             tg3_flag(tp, 57765_PLUS)) {
8673                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8674                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8675                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8676                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8677                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8678                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8679                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8680                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8681                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8682                 }
8683                 tw32(TG3_RDMA_RSRVCTRL_REG,
8684                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8685         }
8686
8687         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8688             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8689                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8690                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8691                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8692                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8693         }
8694
8695         /* Receive/send statistics. */
8696         if (tg3_flag(tp, 5750_PLUS)) {
8697                 val = tr32(RCVLPC_STATS_ENABLE);
8698                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8699                 tw32(RCVLPC_STATS_ENABLE, val);
8700         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8701                    tg3_flag(tp, TSO_CAPABLE)) {
8702                 val = tr32(RCVLPC_STATS_ENABLE);
8703                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8704                 tw32(RCVLPC_STATS_ENABLE, val);
8705         } else {
8706                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8707         }
8708         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8709         tw32(SNDDATAI_STATSENAB, 0xffffff);
8710         tw32(SNDDATAI_STATSCTRL,
8711              (SNDDATAI_SCTRL_ENABLE |
8712               SNDDATAI_SCTRL_FASTUPD));
8713
8714         /* Setup host coalescing engine. */
8715         tw32(HOSTCC_MODE, 0);
8716         for (i = 0; i < 2000; i++) {
8717                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8718                         break;
8719                 udelay(10);
8720         }
8721
8722         __tg3_set_coalesce(tp, &tp->coal);
8723
8724         if (!tg3_flag(tp, 5705_PLUS)) {
8725                 /* Status/statistics block address.  See tg3_timer,
8726                  * the tg3_periodic_fetch_stats call there, and
8727                  * tg3_get_stats to see how this works for 5705/5750 chips.
8728                  */
8729                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8730                      ((u64) tp->stats_mapping >> 32));
8731                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8732                      ((u64) tp->stats_mapping & 0xffffffff));
8733                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8734
8735                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8736
8737                 /* Clear statistics and status block memory areas */
8738                 for (i = NIC_SRAM_STATS_BLK;
8739                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8740                      i += sizeof(u32)) {
8741                         tg3_write_mem(tp, i, 0);
8742                         udelay(40);
8743                 }
8744         }
8745
8746         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8747
8748         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8749         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8750         if (!tg3_flag(tp, 5705_PLUS))
8751                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8752
8753         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8754                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8755                 /* reset to prevent losing 1st rx packet intermittently */
8756                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8757                 udelay(10);
8758         }
8759
8760         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8761                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8762                         MAC_MODE_FHDE_ENABLE;
8763         if (tg3_flag(tp, ENABLE_APE))
8764                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8765         if (!tg3_flag(tp, 5705_PLUS) &&
8766             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8767             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8768                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8769         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8770         udelay(40);
8771
8772         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8773          * If TG3_FLAG_IS_NIC is zero, we should read the
8774          * register to preserve the GPIO settings for LOMs. The GPIOs,
8775          * whether used as inputs or outputs, are set by boot code after
8776          * reset.
8777          */
8778         if (!tg3_flag(tp, IS_NIC)) {
8779                 u32 gpio_mask;
8780
8781                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8782                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8783                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8784
8785                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8786                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8787                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8788
8789                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8790                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8791
8792                 tp->grc_local_ctrl &= ~gpio_mask;
8793                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8794
8795                 /* GPIO1 must be driven high for eeprom write protect */
8796                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8797                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8798                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8799         }
8800         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8801         udelay(100);
8802
8803         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8804                 val = tr32(MSGINT_MODE);
8805                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8806                 if (!tg3_flag(tp, 1SHOT_MSI))
8807                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8808                 tw32(MSGINT_MODE, val);
8809         }
8810
8811         if (!tg3_flag(tp, 5705_PLUS)) {
8812                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8813                 udelay(40);
8814         }
8815
8816         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8817                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8818                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8819                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8820                WDMAC_MODE_LNGREAD_ENAB);
8821
8822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8823             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8824                 if (tg3_flag(tp, TSO_CAPABLE) &&
8825                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8826                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8827                         /* nothing */
8828                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8829                            !tg3_flag(tp, IS_5788)) {
8830                         val |= WDMAC_MODE_RX_ACCEL;
8831                 }
8832         }
8833
8834         /* Enable host coalescing bug fix */
8835         if (tg3_flag(tp, 5755_PLUS))
8836                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8837
8838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8839                 val |= WDMAC_MODE_BURST_ALL_DATA;
8840
8841         tw32_f(WDMAC_MODE, val);
8842         udelay(40);
8843
8844         if (tg3_flag(tp, PCIX_MODE)) {
8845                 u16 pcix_cmd;
8846
8847                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8848                                      &pcix_cmd);
8849                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8850                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8851                         pcix_cmd |= PCI_X_CMD_READ_2K;
8852                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8853                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8854                         pcix_cmd |= PCI_X_CMD_READ_2K;
8855                 }
8856                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8857                                       pcix_cmd);
8858         }
8859
8860         tw32_f(RDMAC_MODE, rdmac_mode);
8861         udelay(40);
8862
8863         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8864         if (!tg3_flag(tp, 5705_PLUS))
8865                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8866
8867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8868                 tw32(SNDDATAC_MODE,
8869                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8870         else
8871                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8872
8873         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8874         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8875         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8876         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8877                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8878         tw32(RCVDBDI_MODE, val);
8879         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8880         if (tg3_flag(tp, HW_TSO_1) ||
8881             tg3_flag(tp, HW_TSO_2) ||
8882             tg3_flag(tp, HW_TSO_3))
8883                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8884         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8885         if (tg3_flag(tp, ENABLE_TSS))
8886                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8887         tw32(SNDBDI_MODE, val);
8888         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8889
8890         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8891                 err = tg3_load_5701_a0_firmware_fix(tp);
8892                 if (err)
8893                         return err;
8894         }
8895
8896         if (tg3_flag(tp, TSO_CAPABLE)) {
8897                 err = tg3_load_tso_firmware(tp);
8898                 if (err)
8899                         return err;
8900         }
8901
8902         tp->tx_mode = TX_MODE_ENABLE;
8903
8904         if (tg3_flag(tp, 5755_PLUS) ||
8905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8906                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8907
8908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8909                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8910                 tp->tx_mode &= ~val;
8911                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8912         }
8913
8914         tw32_f(MAC_TX_MODE, tp->tx_mode);
8915         udelay(100);
8916
8917         if (tg3_flag(tp, ENABLE_RSS)) {
8918                 int i = 0;
8919                 u32 reg = MAC_RSS_INDIR_TBL_0;
8920
8921                 if (tp->irq_cnt == 2) {
8922                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8923                                 tw32(reg, 0x0);
8924                                 reg += 4;
8925                         }
8926                 } else {
8927                         u32 val;
8928
8929                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8930                                 val = i % (tp->irq_cnt - 1);
8931                                 i++;
8932                                 for (; i % 8; i++) {
8933                                         val <<= 4;
8934                                         val |= (i % (tp->irq_cnt - 1));
8935                                 }
8936                                 tw32(reg, val);
8937                                 reg += 4;
8938                         }
8939                 }
8940
8941                 /* Setup the "secret" hash key. */
8942                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8943                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8944                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8945                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8946                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8947                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8948                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8949                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8950                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8951                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8952         }
8953
8954         tp->rx_mode = RX_MODE_ENABLE;
8955         if (tg3_flag(tp, 5755_PLUS))
8956                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8957
8958         if (tg3_flag(tp, ENABLE_RSS))
8959                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8960                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8961                                RX_MODE_RSS_IPV6_HASH_EN |
8962                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8963                                RX_MODE_RSS_IPV4_HASH_EN |
8964                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8965
8966         tw32_f(MAC_RX_MODE, tp->rx_mode);
8967         udelay(10);
8968
8969         tw32(MAC_LED_CTRL, tp->led_ctrl);
8970
8971         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8972         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8973                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8974                 udelay(10);
8975         }
8976         tw32_f(MAC_RX_MODE, tp->rx_mode);
8977         udelay(10);
8978
8979         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8980                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8981                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8982                         /* Set drive transmission level to 1.2V  */
8983                         /* only if the signal pre-emphasis bit is not set  */
8984                         val = tr32(MAC_SERDES_CFG);
8985                         val &= 0xfffff000;
8986                         val |= 0x880;
8987                         tw32(MAC_SERDES_CFG, val);
8988                 }
8989                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8990                         tw32(MAC_SERDES_CFG, 0x616000);
8991         }
8992
8993         /* Prevent chip from dropping frames when flow control
8994          * is enabled.
8995          */
8996         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8997                 val = 1;
8998         else
8999                 val = 2;
9000         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9001
9002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9003             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9004                 /* Use hardware link auto-negotiation */
9005                 tg3_flag_set(tp, HW_AUTONEG);
9006         }
9007
9008         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9009             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9010                 u32 tmp;
9011
9012                 tmp = tr32(SERDES_RX_CTRL);
9013                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9014                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9015                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9016                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9017         }
9018
9019         if (!tg3_flag(tp, USE_PHYLIB)) {
9020                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9021                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9022                         tp->link_config.speed = tp->link_config.orig_speed;
9023                         tp->link_config.duplex = tp->link_config.orig_duplex;
9024                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9025                 }
9026
9027                 err = tg3_setup_phy(tp, 0);
9028                 if (err)
9029                         return err;
9030
9031                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9032                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9033                         u32 tmp;
9034
9035                         /* Clear CRC stats. */
9036                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9037                                 tg3_writephy(tp, MII_TG3_TEST1,
9038                                              tmp | MII_TG3_TEST1_CRC_EN);
9039                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9040                         }
9041                 }
9042         }
9043
9044         __tg3_set_rx_mode(tp->dev);
9045
9046         /* Initialize receive rules. */
9047         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9048         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9049         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9050         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9051
9052         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9053                 limit = 8;
9054         else
9055                 limit = 16;
9056         if (tg3_flag(tp, ENABLE_ASF))
9057                 limit -= 4;
9058         switch (limit) {
9059         case 16:
9060                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9061         case 15:
9062                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9063         case 14:
9064                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9065         case 13:
9066                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9067         case 12:
9068                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9069         case 11:
9070                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9071         case 10:
9072                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9073         case 9:
9074                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9075         case 8:
9076                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9077         case 7:
9078                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9079         case 6:
9080                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9081         case 5:
9082                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9083         case 4:
9084                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9085         case 3:
9086                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9087         case 2:
9088         case 1:
9089
9090         default:
9091                 break;
9092         }
9093
9094         if (tg3_flag(tp, ENABLE_APE))
9095                 /* Write our heartbeat update interval to APE. */
9096                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9097                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9098
9099         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9100
9101         return 0;
9102 }
9103
9104 /* Called at device open time to get the chip ready for
9105  * packet processing.  Invoked with tp->lock held.
9106  */
9107 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9108 {
9109         tg3_switch_clocks(tp);
9110
9111         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9112
9113         return tg3_reset_hw(tp, reset_phy);
9114 }
9115
9116 #define TG3_STAT_ADD32(PSTAT, REG) \
9117 do {    u32 __val = tr32(REG); \
9118         (PSTAT)->low += __val; \
9119         if ((PSTAT)->low < __val) \
9120                 (PSTAT)->high += 1; \
9121 } while (0)
9122
9123 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9124 {
9125         struct tg3_hw_stats *sp = tp->hw_stats;
9126
9127         if (!netif_carrier_ok(tp->dev))
9128                 return;
9129
9130         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9131         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9132         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9133         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9134         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9135         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9136         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9137         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9138         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9139         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9140         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9141         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9142         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9143
9144         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9145         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9146         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9147         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9148         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9149         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9150         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9151         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9152         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9153         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9154         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9155         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9156         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9157         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9158
9159         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9160         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9161             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9162             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9163                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9164         } else {
9165                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9166                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9167                 if (val) {
9168                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9169                         sp->rx_discards.low += val;
9170                         if (sp->rx_discards.low < val)
9171                                 sp->rx_discards.high += 1;
9172                 }
9173                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9174         }
9175         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9176 }
9177
9178 static void tg3_chk_missed_msi(struct tg3 *tp)
9179 {
9180         u32 i;
9181
9182         for (i = 0; i < tp->irq_cnt; i++) {
9183                 struct tg3_napi *tnapi = &tp->napi[i];
9184
9185                 if (tg3_has_work(tnapi)) {
9186                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9187                             tnapi->last_tx_cons == tnapi->tx_cons) {
9188                                 if (tnapi->chk_msi_cnt < 1) {
9189                                         tnapi->chk_msi_cnt++;
9190                                         return;
9191                                 }
9192                                 tg3_msi(0, tnapi);
9193                         }
9194                 }
9195                 tnapi->chk_msi_cnt = 0;
9196                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9197                 tnapi->last_tx_cons = tnapi->tx_cons;
9198         }
9199 }
9200
9201 static void tg3_timer(unsigned long __opaque)
9202 {
9203         struct tg3 *tp = (struct tg3 *) __opaque;
9204
9205         if (tp->irq_sync)
9206                 goto restart_timer;
9207
9208         spin_lock(&tp->lock);
9209
9210         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9211             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9212                 tg3_chk_missed_msi(tp);
9213
9214         if (!tg3_flag(tp, TAGGED_STATUS)) {
9215                 /* All of this garbage is because when using non-tagged
9216                  * IRQ status the mailbox/status_block protocol the chip
9217                  * uses with the cpu is race prone.
9218                  */
9219                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9220                         tw32(GRC_LOCAL_CTRL,
9221                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9222                 } else {
9223                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9224                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9225                 }
9226
9227                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9228                         tg3_flag_set(tp, RESTART_TIMER);
9229                         spin_unlock(&tp->lock);
9230                         schedule_work(&tp->reset_task);
9231                         return;
9232                 }
9233         }
9234
9235         /* This part only runs once per second. */
9236         if (!--tp->timer_counter) {
9237                 if (tg3_flag(tp, 5705_PLUS))
9238                         tg3_periodic_fetch_stats(tp);
9239
9240                 if (tp->setlpicnt && !--tp->setlpicnt)
9241                         tg3_phy_eee_enable(tp);
9242
9243                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9244                         u32 mac_stat;
9245                         int phy_event;
9246
9247                         mac_stat = tr32(MAC_STATUS);
9248
9249                         phy_event = 0;
9250                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9251                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9252                                         phy_event = 1;
9253                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9254                                 phy_event = 1;
9255
9256                         if (phy_event)
9257                                 tg3_setup_phy(tp, 0);
9258                 } else if (tg3_flag(tp, POLL_SERDES)) {
9259                         u32 mac_stat = tr32(MAC_STATUS);
9260                         int need_setup = 0;
9261
9262                         if (netif_carrier_ok(tp->dev) &&
9263                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9264                                 need_setup = 1;
9265                         }
9266                         if (!netif_carrier_ok(tp->dev) &&
9267                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9268                                          MAC_STATUS_SIGNAL_DET))) {
9269                                 need_setup = 1;
9270                         }
9271                         if (need_setup) {
9272                                 if (!tp->serdes_counter) {
9273                                         tw32_f(MAC_MODE,
9274                                              (tp->mac_mode &
9275                                               ~MAC_MODE_PORT_MODE_MASK));
9276                                         udelay(40);
9277                                         tw32_f(MAC_MODE, tp->mac_mode);
9278                                         udelay(40);
9279                                 }
9280                                 tg3_setup_phy(tp, 0);
9281                         }
9282                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9283                            tg3_flag(tp, 5780_CLASS)) {
9284                         tg3_serdes_parallel_detect(tp);
9285                 }
9286
9287                 tp->timer_counter = tp->timer_multiplier;
9288         }
9289
9290         /* Heartbeat is only sent once every 2 seconds.
9291          *
9292          * The heartbeat is to tell the ASF firmware that the host
9293          * driver is still alive.  In the event that the OS crashes,
9294          * ASF needs to reset the hardware to free up the FIFO space
9295          * that may be filled with rx packets destined for the host.
9296          * If the FIFO is full, ASF will no longer function properly.
9297          *
9298          * Unintended resets have been reported on real time kernels
9299          * where the timer doesn't run on time.  Netpoll will also have
9300          * same problem.
9301          *
9302          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9303          * to check the ring condition when the heartbeat is expiring
9304          * before doing the reset.  This will prevent most unintended
9305          * resets.
9306          */
9307         if (!--tp->asf_counter) {
9308                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9309                         tg3_wait_for_event_ack(tp);
9310
9311                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9312                                       FWCMD_NICDRV_ALIVE3);
9313                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9314                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9315                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9316
9317                         tg3_generate_fw_event(tp);
9318                 }
9319                 tp->asf_counter = tp->asf_multiplier;
9320         }
9321
9322         spin_unlock(&tp->lock);
9323
9324 restart_timer:
9325         tp->timer.expires = jiffies + tp->timer_offset;
9326         add_timer(&tp->timer);
9327 }
9328
9329 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9330 {
9331         irq_handler_t fn;
9332         unsigned long flags;
9333         char *name;
9334         struct tg3_napi *tnapi = &tp->napi[irq_num];
9335
9336         if (tp->irq_cnt == 1)
9337                 name = tp->dev->name;
9338         else {
9339                 name = &tnapi->irq_lbl[0];
9340                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9341                 name[IFNAMSIZ-1] = 0;
9342         }
9343
9344         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9345                 fn = tg3_msi;
9346                 if (tg3_flag(tp, 1SHOT_MSI))
9347                         fn = tg3_msi_1shot;
9348                 flags = 0;
9349         } else {
9350                 fn = tg3_interrupt;
9351                 if (tg3_flag(tp, TAGGED_STATUS))
9352                         fn = tg3_interrupt_tagged;
9353                 flags = IRQF_SHARED;
9354         }
9355
9356         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9357 }
9358
9359 static int tg3_test_interrupt(struct tg3 *tp)
9360 {
9361         struct tg3_napi *tnapi = &tp->napi[0];
9362         struct net_device *dev = tp->dev;
9363         int err, i, intr_ok = 0;
9364         u32 val;
9365
9366         if (!netif_running(dev))
9367                 return -ENODEV;
9368
9369         tg3_disable_ints(tp);
9370
9371         free_irq(tnapi->irq_vec, tnapi);
9372
9373         /*
9374          * Turn off MSI one shot mode.  Otherwise this test has no
9375          * observable way to know whether the interrupt was delivered.
9376          */
9377         if (tg3_flag(tp, 57765_PLUS)) {
9378                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9379                 tw32(MSGINT_MODE, val);
9380         }
9381
9382         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9383                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9384         if (err)
9385                 return err;
9386
9387         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9388         tg3_enable_ints(tp);
9389
9390         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9391                tnapi->coal_now);
9392
9393         for (i = 0; i < 5; i++) {
9394                 u32 int_mbox, misc_host_ctrl;
9395
9396                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9397                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9398
9399                 if ((int_mbox != 0) ||
9400                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9401                         intr_ok = 1;
9402                         break;
9403                 }
9404
9405                 if (tg3_flag(tp, 57765_PLUS) &&
9406                     tnapi->hw_status->status_tag != tnapi->last_tag)
9407                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9408
9409                 msleep(10);
9410         }
9411
9412         tg3_disable_ints(tp);
9413
9414         free_irq(tnapi->irq_vec, tnapi);
9415
9416         err = tg3_request_irq(tp, 0);
9417
9418         if (err)
9419                 return err;
9420
9421         if (intr_ok) {
9422                 /* Reenable MSI one shot mode. */
9423                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9424                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9425                         tw32(MSGINT_MODE, val);
9426                 }
9427                 return 0;
9428         }
9429
9430         return -EIO;
9431 }
9432
9433 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9434  * successfully restored
9435  */
9436 static int tg3_test_msi(struct tg3 *tp)
9437 {
9438         int err;
9439         u16 pci_cmd;
9440
9441         if (!tg3_flag(tp, USING_MSI))
9442                 return 0;
9443
9444         /* Turn off SERR reporting in case MSI terminates with Master
9445          * Abort.
9446          */
9447         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9448         pci_write_config_word(tp->pdev, PCI_COMMAND,
9449                               pci_cmd & ~PCI_COMMAND_SERR);
9450
9451         err = tg3_test_interrupt(tp);
9452
9453         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9454
9455         if (!err)
9456                 return 0;
9457
9458         /* other failures */
9459         if (err != -EIO)
9460                 return err;
9461
9462         /* MSI test failed, go back to INTx mode */
9463         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9464                     "to INTx mode. Please report this failure to the PCI "
9465                     "maintainer and include system chipset information\n");
9466
9467         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9468
9469         pci_disable_msi(tp->pdev);
9470
9471         tg3_flag_clear(tp, USING_MSI);
9472         tp->napi[0].irq_vec = tp->pdev->irq;
9473
9474         err = tg3_request_irq(tp, 0);
9475         if (err)
9476                 return err;
9477
9478         /* Need to reset the chip because the MSI cycle may have terminated
9479          * with Master Abort.
9480          */
9481         tg3_full_lock(tp, 1);
9482
9483         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9484         err = tg3_init_hw(tp, 1);
9485
9486         tg3_full_unlock(tp);
9487
9488         if (err)
9489                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9490
9491         return err;
9492 }
9493
9494 static int tg3_request_firmware(struct tg3 *tp)
9495 {
9496         const __be32 *fw_data;
9497
9498         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9499                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9500                            tp->fw_needed);
9501                 return -ENOENT;
9502         }
9503
9504         fw_data = (void *)tp->fw->data;
9505
9506         /* Firmware blob starts with version numbers, followed by
9507          * start address and _full_ length including BSS sections
9508          * (which must be longer than the actual data, of course
9509          */
9510
9511         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9512         if (tp->fw_len < (tp->fw->size - 12)) {
9513                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9514                            tp->fw_len, tp->fw_needed);
9515                 release_firmware(tp->fw);
9516                 tp->fw = NULL;
9517                 return -EINVAL;
9518         }
9519
9520         /* We no longer need firmware; we have it. */
9521         tp->fw_needed = NULL;
9522         return 0;
9523 }
9524
9525 static bool tg3_enable_msix(struct tg3 *tp)
9526 {
9527         int i, rc, cpus = num_online_cpus();
9528         struct msix_entry msix_ent[tp->irq_max];
9529
9530         if (cpus == 1)
9531                 /* Just fallback to the simpler MSI mode. */
9532                 return false;
9533
9534         /*
9535          * We want as many rx rings enabled as there are cpus.
9536          * The first MSIX vector only deals with link interrupts, etc,
9537          * so we add one to the number of vectors we are requesting.
9538          */
9539         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9540
9541         for (i = 0; i < tp->irq_max; i++) {
9542                 msix_ent[i].entry  = i;
9543                 msix_ent[i].vector = 0;
9544         }
9545
9546         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9547         if (rc < 0) {
9548                 return false;
9549         } else if (rc != 0) {
9550                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9551                         return false;
9552                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9553                               tp->irq_cnt, rc);
9554                 tp->irq_cnt = rc;
9555         }
9556
9557         for (i = 0; i < tp->irq_max; i++)
9558                 tp->napi[i].irq_vec = msix_ent[i].vector;
9559
9560         netif_set_real_num_tx_queues(tp->dev, 1);
9561         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9562         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9563                 pci_disable_msix(tp->pdev);
9564                 return false;
9565         }
9566
9567         if (tp->irq_cnt > 1) {
9568                 tg3_flag_set(tp, ENABLE_RSS);
9569
9570                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9571                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9572                         tg3_flag_set(tp, ENABLE_TSS);
9573                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9574                 }
9575         }
9576
9577         return true;
9578 }
9579
9580 static void tg3_ints_init(struct tg3 *tp)
9581 {
9582         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9583             !tg3_flag(tp, TAGGED_STATUS)) {
9584                 /* All MSI supporting chips should support tagged
9585                  * status.  Assert that this is the case.
9586                  */
9587                 netdev_warn(tp->dev,
9588                             "MSI without TAGGED_STATUS? Not using MSI\n");
9589                 goto defcfg;
9590         }
9591
9592         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9593                 tg3_flag_set(tp, USING_MSIX);
9594         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9595                 tg3_flag_set(tp, USING_MSI);
9596
9597         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9598                 u32 msi_mode = tr32(MSGINT_MODE);
9599                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9600                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9601                 if (!tg3_flag(tp, 1SHOT_MSI))
9602                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9603                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9604         }
9605 defcfg:
9606         if (!tg3_flag(tp, USING_MSIX)) {
9607                 tp->irq_cnt = 1;
9608                 tp->napi[0].irq_vec = tp->pdev->irq;
9609                 netif_set_real_num_tx_queues(tp->dev, 1);
9610                 netif_set_real_num_rx_queues(tp->dev, 1);
9611         }
9612 }
9613
9614 static void tg3_ints_fini(struct tg3 *tp)
9615 {
9616         if (tg3_flag(tp, USING_MSIX))
9617                 pci_disable_msix(tp->pdev);
9618         else if (tg3_flag(tp, USING_MSI))
9619                 pci_disable_msi(tp->pdev);
9620         tg3_flag_clear(tp, USING_MSI);
9621         tg3_flag_clear(tp, USING_MSIX);
9622         tg3_flag_clear(tp, ENABLE_RSS);
9623         tg3_flag_clear(tp, ENABLE_TSS);
9624 }
9625
9626 static int tg3_open(struct net_device *dev)
9627 {
9628         struct tg3 *tp = netdev_priv(dev);
9629         int i, err;
9630
9631         if (tp->fw_needed) {
9632                 err = tg3_request_firmware(tp);
9633                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9634                         if (err)
9635                                 return err;
9636                 } else if (err) {
9637                         netdev_warn(tp->dev, "TSO capability disabled\n");
9638                         tg3_flag_clear(tp, TSO_CAPABLE);
9639                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9640                         netdev_notice(tp->dev, "TSO capability restored\n");
9641                         tg3_flag_set(tp, TSO_CAPABLE);
9642                 }
9643         }
9644
9645         netif_carrier_off(tp->dev);
9646
9647         err = tg3_power_up(tp);
9648         if (err)
9649                 return err;
9650
9651         tg3_full_lock(tp, 0);
9652
9653         tg3_disable_ints(tp);
9654         tg3_flag_clear(tp, INIT_COMPLETE);
9655
9656         tg3_full_unlock(tp);
9657
9658         /*
9659          * Setup interrupts first so we know how
9660          * many NAPI resources to allocate
9661          */
9662         tg3_ints_init(tp);
9663
9664         /* The placement of this call is tied
9665          * to the setup and use of Host TX descriptors.
9666          */
9667         err = tg3_alloc_consistent(tp);
9668         if (err)
9669                 goto err_out1;
9670
9671         tg3_napi_init(tp);
9672
9673         tg3_napi_enable(tp);
9674
9675         for (i = 0; i < tp->irq_cnt; i++) {
9676                 struct tg3_napi *tnapi = &tp->napi[i];
9677                 err = tg3_request_irq(tp, i);
9678                 if (err) {
9679                         for (i--; i >= 0; i--)
9680                                 free_irq(tnapi->irq_vec, tnapi);
9681                         break;
9682                 }
9683         }
9684
9685         if (err)
9686                 goto err_out2;
9687
9688         tg3_full_lock(tp, 0);
9689
9690         err = tg3_init_hw(tp, 1);
9691         if (err) {
9692                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9693                 tg3_free_rings(tp);
9694         } else {
9695                 if (tg3_flag(tp, TAGGED_STATUS) &&
9696                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9697                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9698                         tp->timer_offset = HZ;
9699                 else
9700                         tp->timer_offset = HZ / 10;
9701
9702                 BUG_ON(tp->timer_offset > HZ);
9703                 tp->timer_counter = tp->timer_multiplier =
9704                         (HZ / tp->timer_offset);
9705                 tp->asf_counter = tp->asf_multiplier =
9706                         ((HZ / tp->timer_offset) * 2);
9707
9708                 init_timer(&tp->timer);
9709                 tp->timer.expires = jiffies + tp->timer_offset;
9710                 tp->timer.data = (unsigned long) tp;
9711                 tp->timer.function = tg3_timer;
9712         }
9713
9714         tg3_full_unlock(tp);
9715
9716         if (err)
9717                 goto err_out3;
9718
9719         if (tg3_flag(tp, USING_MSI)) {
9720                 err = tg3_test_msi(tp);
9721
9722                 if (err) {
9723                         tg3_full_lock(tp, 0);
9724                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9725                         tg3_free_rings(tp);
9726                         tg3_full_unlock(tp);
9727
9728                         goto err_out2;
9729                 }
9730
9731                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9732                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9733
9734                         tw32(PCIE_TRANSACTION_CFG,
9735                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9736                 }
9737         }
9738
9739         tg3_phy_start(tp);
9740
9741         tg3_full_lock(tp, 0);
9742
9743         add_timer(&tp->timer);
9744         tg3_flag_set(tp, INIT_COMPLETE);
9745         tg3_enable_ints(tp);
9746
9747         tg3_full_unlock(tp);
9748
9749         netif_tx_start_all_queues(dev);
9750
9751         /*
9752          * Reset loopback feature if it was turned on while the device was down
9753          * make sure that it's installed properly now.
9754          */
9755         if (dev->features & NETIF_F_LOOPBACK)
9756                 tg3_set_loopback(dev, dev->features);
9757
9758         return 0;
9759
9760 err_out3:
9761         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9762                 struct tg3_napi *tnapi = &tp->napi[i];
9763                 free_irq(tnapi->irq_vec, tnapi);
9764         }
9765
9766 err_out2:
9767         tg3_napi_disable(tp);
9768         tg3_napi_fini(tp);
9769         tg3_free_consistent(tp);
9770
9771 err_out1:
9772         tg3_ints_fini(tp);
9773         tg3_frob_aux_power(tp, false);
9774         pci_set_power_state(tp->pdev, PCI_D3hot);
9775         return err;
9776 }
9777
9778 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9779                                                  struct rtnl_link_stats64 *);
9780 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9781
9782 static int tg3_close(struct net_device *dev)
9783 {
9784         int i;
9785         struct tg3 *tp = netdev_priv(dev);
9786
9787         tg3_napi_disable(tp);
9788         cancel_work_sync(&tp->reset_task);
9789
9790         netif_tx_stop_all_queues(dev);
9791
9792         del_timer_sync(&tp->timer);
9793
9794         tg3_phy_stop(tp);
9795
9796         tg3_full_lock(tp, 1);
9797
9798         tg3_disable_ints(tp);
9799
9800         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9801         tg3_free_rings(tp);
9802         tg3_flag_clear(tp, INIT_COMPLETE);
9803
9804         tg3_full_unlock(tp);
9805
9806         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9807                 struct tg3_napi *tnapi = &tp->napi[i];
9808                 free_irq(tnapi->irq_vec, tnapi);
9809         }
9810
9811         tg3_ints_fini(tp);
9812
9813         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9814
9815         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9816                sizeof(tp->estats_prev));
9817
9818         tg3_napi_fini(tp);
9819
9820         tg3_free_consistent(tp);
9821
9822         tg3_power_down(tp);
9823
9824         netif_carrier_off(tp->dev);
9825
9826         return 0;
9827 }
9828
9829 static inline u64 get_stat64(tg3_stat64_t *val)
9830 {
9831        return ((u64)val->high << 32) | ((u64)val->low);
9832 }
9833
9834 static u64 calc_crc_errors(struct tg3 *tp)
9835 {
9836         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9837
9838         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9839             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9840              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9841                 u32 val;
9842
9843                 spin_lock_bh(&tp->lock);
9844                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9845                         tg3_writephy(tp, MII_TG3_TEST1,
9846                                      val | MII_TG3_TEST1_CRC_EN);
9847                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9848                 } else
9849                         val = 0;
9850                 spin_unlock_bh(&tp->lock);
9851
9852                 tp->phy_crc_errors += val;
9853
9854                 return tp->phy_crc_errors;
9855         }
9856
9857         return get_stat64(&hw_stats->rx_fcs_errors);
9858 }
9859
9860 #define ESTAT_ADD(member) \
9861         estats->member =        old_estats->member + \
9862                                 get_stat64(&hw_stats->member)
9863
9864 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9865 {
9866         struct tg3_ethtool_stats *estats = &tp->estats;
9867         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9868         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9869
9870         if (!hw_stats)
9871                 return old_estats;
9872
9873         ESTAT_ADD(rx_octets);
9874         ESTAT_ADD(rx_fragments);
9875         ESTAT_ADD(rx_ucast_packets);
9876         ESTAT_ADD(rx_mcast_packets);
9877         ESTAT_ADD(rx_bcast_packets);
9878         ESTAT_ADD(rx_fcs_errors);
9879         ESTAT_ADD(rx_align_errors);
9880         ESTAT_ADD(rx_xon_pause_rcvd);
9881         ESTAT_ADD(rx_xoff_pause_rcvd);
9882         ESTAT_ADD(rx_mac_ctrl_rcvd);
9883         ESTAT_ADD(rx_xoff_entered);
9884         ESTAT_ADD(rx_frame_too_long_errors);
9885         ESTAT_ADD(rx_jabbers);
9886         ESTAT_ADD(rx_undersize_packets);
9887         ESTAT_ADD(rx_in_length_errors);
9888         ESTAT_ADD(rx_out_length_errors);
9889         ESTAT_ADD(rx_64_or_less_octet_packets);
9890         ESTAT_ADD(rx_65_to_127_octet_packets);
9891         ESTAT_ADD(rx_128_to_255_octet_packets);
9892         ESTAT_ADD(rx_256_to_511_octet_packets);
9893         ESTAT_ADD(rx_512_to_1023_octet_packets);
9894         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9895         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9896         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9897         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9898         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9899
9900         ESTAT_ADD(tx_octets);
9901         ESTAT_ADD(tx_collisions);
9902         ESTAT_ADD(tx_xon_sent);
9903         ESTAT_ADD(tx_xoff_sent);
9904         ESTAT_ADD(tx_flow_control);
9905         ESTAT_ADD(tx_mac_errors);
9906         ESTAT_ADD(tx_single_collisions);
9907         ESTAT_ADD(tx_mult_collisions);
9908         ESTAT_ADD(tx_deferred);
9909         ESTAT_ADD(tx_excessive_collisions);
9910         ESTAT_ADD(tx_late_collisions);
9911         ESTAT_ADD(tx_collide_2times);
9912         ESTAT_ADD(tx_collide_3times);
9913         ESTAT_ADD(tx_collide_4times);
9914         ESTAT_ADD(tx_collide_5times);
9915         ESTAT_ADD(tx_collide_6times);
9916         ESTAT_ADD(tx_collide_7times);
9917         ESTAT_ADD(tx_collide_8times);
9918         ESTAT_ADD(tx_collide_9times);
9919         ESTAT_ADD(tx_collide_10times);
9920         ESTAT_ADD(tx_collide_11times);
9921         ESTAT_ADD(tx_collide_12times);
9922         ESTAT_ADD(tx_collide_13times);
9923         ESTAT_ADD(tx_collide_14times);
9924         ESTAT_ADD(tx_collide_15times);
9925         ESTAT_ADD(tx_ucast_packets);
9926         ESTAT_ADD(tx_mcast_packets);
9927         ESTAT_ADD(tx_bcast_packets);
9928         ESTAT_ADD(tx_carrier_sense_errors);
9929         ESTAT_ADD(tx_discards);
9930         ESTAT_ADD(tx_errors);
9931
9932         ESTAT_ADD(dma_writeq_full);
9933         ESTAT_ADD(dma_write_prioq_full);
9934         ESTAT_ADD(rxbds_empty);
9935         ESTAT_ADD(rx_discards);
9936         ESTAT_ADD(rx_errors);
9937         ESTAT_ADD(rx_threshold_hit);
9938
9939         ESTAT_ADD(dma_readq_full);
9940         ESTAT_ADD(dma_read_prioq_full);
9941         ESTAT_ADD(tx_comp_queue_full);
9942
9943         ESTAT_ADD(ring_set_send_prod_index);
9944         ESTAT_ADD(ring_status_update);
9945         ESTAT_ADD(nic_irqs);
9946         ESTAT_ADD(nic_avoided_irqs);
9947         ESTAT_ADD(nic_tx_threshold_hit);
9948
9949         ESTAT_ADD(mbuf_lwm_thresh_hit);
9950
9951         return estats;
9952 }
9953
9954 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9955                                                  struct rtnl_link_stats64 *stats)
9956 {
9957         struct tg3 *tp = netdev_priv(dev);
9958         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9959         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9960
9961         if (!hw_stats)
9962                 return old_stats;
9963
9964         stats->rx_packets = old_stats->rx_packets +
9965                 get_stat64(&hw_stats->rx_ucast_packets) +
9966                 get_stat64(&hw_stats->rx_mcast_packets) +
9967                 get_stat64(&hw_stats->rx_bcast_packets);
9968
9969         stats->tx_packets = old_stats->tx_packets +
9970                 get_stat64(&hw_stats->tx_ucast_packets) +
9971                 get_stat64(&hw_stats->tx_mcast_packets) +
9972                 get_stat64(&hw_stats->tx_bcast_packets);
9973
9974         stats->rx_bytes = old_stats->rx_bytes +
9975                 get_stat64(&hw_stats->rx_octets);
9976         stats->tx_bytes = old_stats->tx_bytes +
9977                 get_stat64(&hw_stats->tx_octets);
9978
9979         stats->rx_errors = old_stats->rx_errors +
9980                 get_stat64(&hw_stats->rx_errors);
9981         stats->tx_errors = old_stats->tx_errors +
9982                 get_stat64(&hw_stats->tx_errors) +
9983                 get_stat64(&hw_stats->tx_mac_errors) +
9984                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9985                 get_stat64(&hw_stats->tx_discards);
9986
9987         stats->multicast = old_stats->multicast +
9988                 get_stat64(&hw_stats->rx_mcast_packets);
9989         stats->collisions = old_stats->collisions +
9990                 get_stat64(&hw_stats->tx_collisions);
9991
9992         stats->rx_length_errors = old_stats->rx_length_errors +
9993                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9994                 get_stat64(&hw_stats->rx_undersize_packets);
9995
9996         stats->rx_over_errors = old_stats->rx_over_errors +
9997                 get_stat64(&hw_stats->rxbds_empty);
9998         stats->rx_frame_errors = old_stats->rx_frame_errors +
9999                 get_stat64(&hw_stats->rx_align_errors);
10000         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10001                 get_stat64(&hw_stats->tx_discards);
10002         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10003                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10004
10005         stats->rx_crc_errors = old_stats->rx_crc_errors +
10006                 calc_crc_errors(tp);
10007
10008         stats->rx_missed_errors = old_stats->rx_missed_errors +
10009                 get_stat64(&hw_stats->rx_discards);
10010
10011         stats->rx_dropped = tp->rx_dropped;
10012
10013         return stats;
10014 }
10015
10016 static inline u32 calc_crc(unsigned char *buf, int len)
10017 {
10018         u32 reg;
10019         u32 tmp;
10020         int j, k;
10021
10022         reg = 0xffffffff;
10023
10024         for (j = 0; j < len; j++) {
10025                 reg ^= buf[j];
10026
10027                 for (k = 0; k < 8; k++) {
10028                         tmp = reg & 0x01;
10029
10030                         reg >>= 1;
10031
10032                         if (tmp)
10033                                 reg ^= 0xedb88320;
10034                 }
10035         }
10036
10037         return ~reg;
10038 }
10039
10040 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10041 {
10042         /* accept or reject all multicast frames */
10043         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10044         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10045         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10046         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10047 }
10048
10049 static void __tg3_set_rx_mode(struct net_device *dev)
10050 {
10051         struct tg3 *tp = netdev_priv(dev);
10052         u32 rx_mode;
10053
10054         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10055                                   RX_MODE_KEEP_VLAN_TAG);
10056
10057 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10058         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10059          * flag clear.
10060          */
10061         if (!tg3_flag(tp, ENABLE_ASF))
10062                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10063 #endif
10064
10065         if (dev->flags & IFF_PROMISC) {
10066                 /* Promiscuous mode. */
10067                 rx_mode |= RX_MODE_PROMISC;
10068         } else if (dev->flags & IFF_ALLMULTI) {
10069                 /* Accept all multicast. */
10070                 tg3_set_multi(tp, 1);
10071         } else if (netdev_mc_empty(dev)) {
10072                 /* Reject all multicast. */
10073                 tg3_set_multi(tp, 0);
10074         } else {
10075                 /* Accept one or more multicast(s). */
10076                 struct netdev_hw_addr *ha;
10077                 u32 mc_filter[4] = { 0, };
10078                 u32 regidx;
10079                 u32 bit;
10080                 u32 crc;
10081
10082                 netdev_for_each_mc_addr(ha, dev) {
10083                         crc = calc_crc(ha->addr, ETH_ALEN);
10084                         bit = ~crc & 0x7f;
10085                         regidx = (bit & 0x60) >> 5;
10086                         bit &= 0x1f;
10087                         mc_filter[regidx] |= (1 << bit);
10088                 }
10089
10090                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10091                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10092                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10093                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10094         }
10095
10096         if (rx_mode != tp->rx_mode) {
10097                 tp->rx_mode = rx_mode;
10098                 tw32_f(MAC_RX_MODE, rx_mode);
10099                 udelay(10);
10100         }
10101 }
10102
10103 static void tg3_set_rx_mode(struct net_device *dev)
10104 {
10105         struct tg3 *tp = netdev_priv(dev);
10106
10107         if (!netif_running(dev))
10108                 return;
10109
10110         tg3_full_lock(tp, 0);
10111         __tg3_set_rx_mode(dev);
10112         tg3_full_unlock(tp);
10113 }
10114
10115 static int tg3_get_regs_len(struct net_device *dev)
10116 {
10117         return TG3_REG_BLK_SIZE;
10118 }
10119
10120 static void tg3_get_regs(struct net_device *dev,
10121                 struct ethtool_regs *regs, void *_p)
10122 {
10123         struct tg3 *tp = netdev_priv(dev);
10124
10125         regs->version = 0;
10126
10127         memset(_p, 0, TG3_REG_BLK_SIZE);
10128
10129         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10130                 return;
10131
10132         tg3_full_lock(tp, 0);
10133
10134         tg3_dump_legacy_regs(tp, (u32 *)_p);
10135
10136         tg3_full_unlock(tp);
10137 }
10138
10139 static int tg3_get_eeprom_len(struct net_device *dev)
10140 {
10141         struct tg3 *tp = netdev_priv(dev);
10142
10143         return tp->nvram_size;
10144 }
10145
10146 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10147 {
10148         struct tg3 *tp = netdev_priv(dev);
10149         int ret;
10150         u8  *pd;
10151         u32 i, offset, len, b_offset, b_count;
10152         __be32 val;
10153
10154         if (tg3_flag(tp, NO_NVRAM))
10155                 return -EINVAL;
10156
10157         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10158                 return -EAGAIN;
10159
10160         offset = eeprom->offset;
10161         len = eeprom->len;
10162         eeprom->len = 0;
10163
10164         eeprom->magic = TG3_EEPROM_MAGIC;
10165
10166         if (offset & 3) {
10167                 /* adjustments to start on required 4 byte boundary */
10168                 b_offset = offset & 3;
10169                 b_count = 4 - b_offset;
10170                 if (b_count > len) {
10171                         /* i.e. offset=1 len=2 */
10172                         b_count = len;
10173                 }
10174                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10175                 if (ret)
10176                         return ret;
10177                 memcpy(data, ((char *)&val) + b_offset, b_count);
10178                 len -= b_count;
10179                 offset += b_count;
10180                 eeprom->len += b_count;
10181         }
10182
10183         /* read bytes up to the last 4 byte boundary */
10184         pd = &data[eeprom->len];
10185         for (i = 0; i < (len - (len & 3)); i += 4) {
10186                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10187                 if (ret) {
10188                         eeprom->len += i;
10189                         return ret;
10190                 }
10191                 memcpy(pd + i, &val, 4);
10192         }
10193         eeprom->len += i;
10194
10195         if (len & 3) {
10196                 /* read last bytes not ending on 4 byte boundary */
10197                 pd = &data[eeprom->len];
10198                 b_count = len & 3;
10199                 b_offset = offset + len - b_count;
10200                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10201                 if (ret)
10202                         return ret;
10203                 memcpy(pd, &val, b_count);
10204                 eeprom->len += b_count;
10205         }
10206         return 0;
10207 }
10208
10209 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10210
10211 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10212 {
10213         struct tg3 *tp = netdev_priv(dev);
10214         int ret;
10215         u32 offset, len, b_offset, odd_len;
10216         u8 *buf;
10217         __be32 start, end;
10218
10219         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10220                 return -EAGAIN;
10221
10222         if (tg3_flag(tp, NO_NVRAM) ||
10223             eeprom->magic != TG3_EEPROM_MAGIC)
10224                 return -EINVAL;
10225
10226         offset = eeprom->offset;
10227         len = eeprom->len;
10228
10229         if ((b_offset = (offset & 3))) {
10230                 /* adjustments to start on required 4 byte boundary */
10231                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10232                 if (ret)
10233                         return ret;
10234                 len += b_offset;
10235                 offset &= ~3;
10236                 if (len < 4)
10237                         len = 4;
10238         }
10239
10240         odd_len = 0;
10241         if (len & 3) {
10242                 /* adjustments to end on required 4 byte boundary */
10243                 odd_len = 1;
10244                 len = (len + 3) & ~3;
10245                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10246                 if (ret)
10247                         return ret;
10248         }
10249
10250         buf = data;
10251         if (b_offset || odd_len) {
10252                 buf = kmalloc(len, GFP_KERNEL);
10253                 if (!buf)
10254                         return -ENOMEM;
10255                 if (b_offset)
10256                         memcpy(buf, &start, 4);
10257                 if (odd_len)
10258                         memcpy(buf+len-4, &end, 4);
10259                 memcpy(buf + b_offset, data, eeprom->len);
10260         }
10261
10262         ret = tg3_nvram_write_block(tp, offset, len, buf);
10263
10264         if (buf != data)
10265                 kfree(buf);
10266
10267         return ret;
10268 }
10269
10270 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10271 {
10272         struct tg3 *tp = netdev_priv(dev);
10273
10274         if (tg3_flag(tp, USE_PHYLIB)) {
10275                 struct phy_device *phydev;
10276                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10277                         return -EAGAIN;
10278                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10279                 return phy_ethtool_gset(phydev, cmd);
10280         }
10281
10282         cmd->supported = (SUPPORTED_Autoneg);
10283
10284         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10285                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10286                                    SUPPORTED_1000baseT_Full);
10287
10288         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10289                 cmd->supported |= (SUPPORTED_100baseT_Half |
10290                                   SUPPORTED_100baseT_Full |
10291                                   SUPPORTED_10baseT_Half |
10292                                   SUPPORTED_10baseT_Full |
10293                                   SUPPORTED_TP);
10294                 cmd->port = PORT_TP;
10295         } else {
10296                 cmd->supported |= SUPPORTED_FIBRE;
10297                 cmd->port = PORT_FIBRE;
10298         }
10299
10300         cmd->advertising = tp->link_config.advertising;
10301         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10302                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10303                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10304                                 cmd->advertising |= ADVERTISED_Pause;
10305                         } else {
10306                                 cmd->advertising |= ADVERTISED_Pause |
10307                                                     ADVERTISED_Asym_Pause;
10308                         }
10309                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10310                         cmd->advertising |= ADVERTISED_Asym_Pause;
10311                 }
10312         }
10313         if (netif_running(dev)) {
10314                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10315                 cmd->duplex = tp->link_config.active_duplex;
10316         } else {
10317                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10318                 cmd->duplex = DUPLEX_INVALID;
10319         }
10320         cmd->phy_address = tp->phy_addr;
10321         cmd->transceiver = XCVR_INTERNAL;
10322         cmd->autoneg = tp->link_config.autoneg;
10323         cmd->maxtxpkt = 0;
10324         cmd->maxrxpkt = 0;
10325         return 0;
10326 }
10327
10328 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10329 {
10330         struct tg3 *tp = netdev_priv(dev);
10331         u32 speed = ethtool_cmd_speed(cmd);
10332
10333         if (tg3_flag(tp, USE_PHYLIB)) {
10334                 struct phy_device *phydev;
10335                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10336                         return -EAGAIN;
10337                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10338                 return phy_ethtool_sset(phydev, cmd);
10339         }
10340
10341         if (cmd->autoneg != AUTONEG_ENABLE &&
10342             cmd->autoneg != AUTONEG_DISABLE)
10343                 return -EINVAL;
10344
10345         if (cmd->autoneg == AUTONEG_DISABLE &&
10346             cmd->duplex != DUPLEX_FULL &&
10347             cmd->duplex != DUPLEX_HALF)
10348                 return -EINVAL;
10349
10350         if (cmd->autoneg == AUTONEG_ENABLE) {
10351                 u32 mask = ADVERTISED_Autoneg |
10352                            ADVERTISED_Pause |
10353                            ADVERTISED_Asym_Pause;
10354
10355                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10356                         mask |= ADVERTISED_1000baseT_Half |
10357                                 ADVERTISED_1000baseT_Full;
10358
10359                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10360                         mask |= ADVERTISED_100baseT_Half |
10361                                 ADVERTISED_100baseT_Full |
10362                                 ADVERTISED_10baseT_Half |
10363                                 ADVERTISED_10baseT_Full |
10364                                 ADVERTISED_TP;
10365                 else
10366                         mask |= ADVERTISED_FIBRE;
10367
10368                 if (cmd->advertising & ~mask)
10369                         return -EINVAL;
10370
10371                 mask &= (ADVERTISED_1000baseT_Half |
10372                          ADVERTISED_1000baseT_Full |
10373                          ADVERTISED_100baseT_Half |
10374                          ADVERTISED_100baseT_Full |
10375                          ADVERTISED_10baseT_Half |
10376                          ADVERTISED_10baseT_Full);
10377
10378                 cmd->advertising &= mask;
10379         } else {
10380                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10381                         if (speed != SPEED_1000)
10382                                 return -EINVAL;
10383
10384                         if (cmd->duplex != DUPLEX_FULL)
10385                                 return -EINVAL;
10386                 } else {
10387                         if (speed != SPEED_100 &&
10388                             speed != SPEED_10)
10389                                 return -EINVAL;
10390                 }
10391         }
10392
10393         tg3_full_lock(tp, 0);
10394
10395         tp->link_config.autoneg = cmd->autoneg;
10396         if (cmd->autoneg == AUTONEG_ENABLE) {
10397                 tp->link_config.advertising = (cmd->advertising |
10398                                               ADVERTISED_Autoneg);
10399                 tp->link_config.speed = SPEED_INVALID;
10400                 tp->link_config.duplex = DUPLEX_INVALID;
10401         } else {
10402                 tp->link_config.advertising = 0;
10403                 tp->link_config.speed = speed;
10404                 tp->link_config.duplex = cmd->duplex;
10405         }
10406
10407         tp->link_config.orig_speed = tp->link_config.speed;
10408         tp->link_config.orig_duplex = tp->link_config.duplex;
10409         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10410
10411         if (netif_running(dev))
10412                 tg3_setup_phy(tp, 1);
10413
10414         tg3_full_unlock(tp);
10415
10416         return 0;
10417 }
10418
10419 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10420 {
10421         struct tg3 *tp = netdev_priv(dev);
10422
10423         strcpy(info->driver, DRV_MODULE_NAME);
10424         strcpy(info->version, DRV_MODULE_VERSION);
10425         strcpy(info->fw_version, tp->fw_ver);
10426         strcpy(info->bus_info, pci_name(tp->pdev));
10427 }
10428
10429 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10430 {
10431         struct tg3 *tp = netdev_priv(dev);
10432
10433         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10434                 wol->supported = WAKE_MAGIC;
10435         else
10436                 wol->supported = 0;
10437         wol->wolopts = 0;
10438         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10439                 wol->wolopts = WAKE_MAGIC;
10440         memset(&wol->sopass, 0, sizeof(wol->sopass));
10441 }
10442
10443 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10444 {
10445         struct tg3 *tp = netdev_priv(dev);
10446         struct device *dp = &tp->pdev->dev;
10447
10448         if (wol->wolopts & ~WAKE_MAGIC)
10449                 return -EINVAL;
10450         if ((wol->wolopts & WAKE_MAGIC) &&
10451             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10452                 return -EINVAL;
10453
10454         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10455
10456         spin_lock_bh(&tp->lock);
10457         if (device_may_wakeup(dp))
10458                 tg3_flag_set(tp, WOL_ENABLE);
10459         else
10460                 tg3_flag_clear(tp, WOL_ENABLE);
10461         spin_unlock_bh(&tp->lock);
10462
10463         return 0;
10464 }
10465
10466 static u32 tg3_get_msglevel(struct net_device *dev)
10467 {
10468         struct tg3 *tp = netdev_priv(dev);
10469         return tp->msg_enable;
10470 }
10471
10472 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10473 {
10474         struct tg3 *tp = netdev_priv(dev);
10475         tp->msg_enable = value;
10476 }
10477
10478 static int tg3_nway_reset(struct net_device *dev)
10479 {
10480         struct tg3 *tp = netdev_priv(dev);
10481         int r;
10482
10483         if (!netif_running(dev))
10484                 return -EAGAIN;
10485
10486         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10487                 return -EINVAL;
10488
10489         if (tg3_flag(tp, USE_PHYLIB)) {
10490                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10491                         return -EAGAIN;
10492                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10493         } else {
10494                 u32 bmcr;
10495
10496                 spin_lock_bh(&tp->lock);
10497                 r = -EINVAL;
10498                 tg3_readphy(tp, MII_BMCR, &bmcr);
10499                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10500                     ((bmcr & BMCR_ANENABLE) ||
10501                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10502                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10503                                                    BMCR_ANENABLE);
10504                         r = 0;
10505                 }
10506                 spin_unlock_bh(&tp->lock);
10507         }
10508
10509         return r;
10510 }
10511
10512 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10513 {
10514         struct tg3 *tp = netdev_priv(dev);
10515
10516         ering->rx_max_pending = tp->rx_std_ring_mask;
10517         ering->rx_mini_max_pending = 0;
10518         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10519                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10520         else
10521                 ering->rx_jumbo_max_pending = 0;
10522
10523         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10524
10525         ering->rx_pending = tp->rx_pending;
10526         ering->rx_mini_pending = 0;
10527         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10528                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10529         else
10530                 ering->rx_jumbo_pending = 0;
10531
10532         ering->tx_pending = tp->napi[0].tx_pending;
10533 }
10534
10535 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10536 {
10537         struct tg3 *tp = netdev_priv(dev);
10538         int i, irq_sync = 0, err = 0;
10539
10540         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10541             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10542             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10543             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10544             (tg3_flag(tp, TSO_BUG) &&
10545              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10546                 return -EINVAL;
10547
10548         if (netif_running(dev)) {
10549                 tg3_phy_stop(tp);
10550                 tg3_netif_stop(tp);
10551                 irq_sync = 1;
10552         }
10553
10554         tg3_full_lock(tp, irq_sync);
10555
10556         tp->rx_pending = ering->rx_pending;
10557
10558         if (tg3_flag(tp, MAX_RXPEND_64) &&
10559             tp->rx_pending > 63)
10560                 tp->rx_pending = 63;
10561         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10562
10563         for (i = 0; i < tp->irq_max; i++)
10564                 tp->napi[i].tx_pending = ering->tx_pending;
10565
10566         if (netif_running(dev)) {
10567                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10568                 err = tg3_restart_hw(tp, 1);
10569                 if (!err)
10570                         tg3_netif_start(tp);
10571         }
10572
10573         tg3_full_unlock(tp);
10574
10575         if (irq_sync && !err)
10576                 tg3_phy_start(tp);
10577
10578         return err;
10579 }
10580
10581 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10582 {
10583         struct tg3 *tp = netdev_priv(dev);
10584
10585         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10586
10587         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10588                 epause->rx_pause = 1;
10589         else
10590                 epause->rx_pause = 0;
10591
10592         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10593                 epause->tx_pause = 1;
10594         else
10595                 epause->tx_pause = 0;
10596 }
10597
10598 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10599 {
10600         struct tg3 *tp = netdev_priv(dev);
10601         int err = 0;
10602
10603         if (tg3_flag(tp, USE_PHYLIB)) {
10604                 u32 newadv;
10605                 struct phy_device *phydev;
10606
10607                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10608
10609                 if (!(phydev->supported & SUPPORTED_Pause) ||
10610                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10611                      (epause->rx_pause != epause->tx_pause)))
10612                         return -EINVAL;
10613
10614                 tp->link_config.flowctrl = 0;
10615                 if (epause->rx_pause) {
10616                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10617
10618                         if (epause->tx_pause) {
10619                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10620                                 newadv = ADVERTISED_Pause;
10621                         } else
10622                                 newadv = ADVERTISED_Pause |
10623                                          ADVERTISED_Asym_Pause;
10624                 } else if (epause->tx_pause) {
10625                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10626                         newadv = ADVERTISED_Asym_Pause;
10627                 } else
10628                         newadv = 0;
10629
10630                 if (epause->autoneg)
10631                         tg3_flag_set(tp, PAUSE_AUTONEG);
10632                 else
10633                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10634
10635                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10636                         u32 oldadv = phydev->advertising &
10637                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10638                         if (oldadv != newadv) {
10639                                 phydev->advertising &=
10640                                         ~(ADVERTISED_Pause |
10641                                           ADVERTISED_Asym_Pause);
10642                                 phydev->advertising |= newadv;
10643                                 if (phydev->autoneg) {
10644                                         /*
10645                                          * Always renegotiate the link to
10646                                          * inform our link partner of our
10647                                          * flow control settings, even if the
10648                                          * flow control is forced.  Let
10649                                          * tg3_adjust_link() do the final
10650                                          * flow control setup.
10651                                          */
10652                                         return phy_start_aneg(phydev);
10653                                 }
10654                         }
10655
10656                         if (!epause->autoneg)
10657                                 tg3_setup_flow_control(tp, 0, 0);
10658                 } else {
10659                         tp->link_config.orig_advertising &=
10660                                         ~(ADVERTISED_Pause |
10661                                           ADVERTISED_Asym_Pause);
10662                         tp->link_config.orig_advertising |= newadv;
10663                 }
10664         } else {
10665                 int irq_sync = 0;
10666
10667                 if (netif_running(dev)) {
10668                         tg3_netif_stop(tp);
10669                         irq_sync = 1;
10670                 }
10671
10672                 tg3_full_lock(tp, irq_sync);
10673
10674                 if (epause->autoneg)
10675                         tg3_flag_set(tp, PAUSE_AUTONEG);
10676                 else
10677                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10678                 if (epause->rx_pause)
10679                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10680                 else
10681                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10682                 if (epause->tx_pause)
10683                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10684                 else
10685                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10686
10687                 if (netif_running(dev)) {
10688                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10689                         err = tg3_restart_hw(tp, 1);
10690                         if (!err)
10691                                 tg3_netif_start(tp);
10692                 }
10693
10694                 tg3_full_unlock(tp);
10695         }
10696
10697         return err;
10698 }
10699
10700 static int tg3_get_sset_count(struct net_device *dev, int sset)
10701 {
10702         switch (sset) {
10703         case ETH_SS_TEST:
10704                 return TG3_NUM_TEST;
10705         case ETH_SS_STATS:
10706                 return TG3_NUM_STATS;
10707         default:
10708                 return -EOPNOTSUPP;
10709         }
10710 }
10711
10712 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10713 {
10714         switch (stringset) {
10715         case ETH_SS_STATS:
10716                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10717                 break;
10718         case ETH_SS_TEST:
10719                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10720                 break;
10721         default:
10722                 WARN_ON(1);     /* we need a WARN() */
10723                 break;
10724         }
10725 }
10726
10727 static int tg3_set_phys_id(struct net_device *dev,
10728                             enum ethtool_phys_id_state state)
10729 {
10730         struct tg3 *tp = netdev_priv(dev);
10731
10732         if (!netif_running(tp->dev))
10733                 return -EAGAIN;
10734
10735         switch (state) {
10736         case ETHTOOL_ID_ACTIVE:
10737                 return 1;       /* cycle on/off once per second */
10738
10739         case ETHTOOL_ID_ON:
10740                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10741                      LED_CTRL_1000MBPS_ON |
10742                      LED_CTRL_100MBPS_ON |
10743                      LED_CTRL_10MBPS_ON |
10744                      LED_CTRL_TRAFFIC_OVERRIDE |
10745                      LED_CTRL_TRAFFIC_BLINK |
10746                      LED_CTRL_TRAFFIC_LED);
10747                 break;
10748
10749         case ETHTOOL_ID_OFF:
10750                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10751                      LED_CTRL_TRAFFIC_OVERRIDE);
10752                 break;
10753
10754         case ETHTOOL_ID_INACTIVE:
10755                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10756                 break;
10757         }
10758
10759         return 0;
10760 }
10761
10762 static void tg3_get_ethtool_stats(struct net_device *dev,
10763                                    struct ethtool_stats *estats, u64 *tmp_stats)
10764 {
10765         struct tg3 *tp = netdev_priv(dev);
10766         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10767 }
10768
10769 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10770 {
10771         int i;
10772         __be32 *buf;
10773         u32 offset = 0, len = 0;
10774         u32 magic, val;
10775
10776         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10777                 return NULL;
10778
10779         if (magic == TG3_EEPROM_MAGIC) {
10780                 for (offset = TG3_NVM_DIR_START;
10781                      offset < TG3_NVM_DIR_END;
10782                      offset += TG3_NVM_DIRENT_SIZE) {
10783                         if (tg3_nvram_read(tp, offset, &val))
10784                                 return NULL;
10785
10786                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10787                             TG3_NVM_DIRTYPE_EXTVPD)
10788                                 break;
10789                 }
10790
10791                 if (offset != TG3_NVM_DIR_END) {
10792                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10793                         if (tg3_nvram_read(tp, offset + 4, &offset))
10794                                 return NULL;
10795
10796                         offset = tg3_nvram_logical_addr(tp, offset);
10797                 }
10798         }
10799
10800         if (!offset || !len) {
10801                 offset = TG3_NVM_VPD_OFF;
10802                 len = TG3_NVM_VPD_LEN;
10803         }
10804
10805         buf = kmalloc(len, GFP_KERNEL);
10806         if (buf == NULL)
10807                 return NULL;
10808
10809         if (magic == TG3_EEPROM_MAGIC) {
10810                 for (i = 0; i < len; i += 4) {
10811                         /* The data is in little-endian format in NVRAM.
10812                          * Use the big-endian read routines to preserve
10813                          * the byte order as it exists in NVRAM.
10814                          */
10815                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10816                                 goto error;
10817                 }
10818         } else {
10819                 u8 *ptr;
10820                 ssize_t cnt;
10821                 unsigned int pos = 0;
10822
10823                 ptr = (u8 *)&buf[0];
10824                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10825                         cnt = pci_read_vpd(tp->pdev, pos,
10826                                            len - pos, ptr);
10827                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10828                                 cnt = 0;
10829                         else if (cnt < 0)
10830                                 goto error;
10831                 }
10832                 if (pos != len)
10833                         goto error;
10834         }
10835
10836         *vpdlen = len;
10837
10838         return buf;
10839
10840 error:
10841         kfree(buf);
10842         return NULL;
10843 }
10844
10845 #define NVRAM_TEST_SIZE 0x100
10846 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10847 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10848 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10849 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10850 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10851 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10852 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10853 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10854
10855 static int tg3_test_nvram(struct tg3 *tp)
10856 {
10857         u32 csum, magic, len;
10858         __be32 *buf;
10859         int i, j, k, err = 0, size;
10860
10861         if (tg3_flag(tp, NO_NVRAM))
10862                 return 0;
10863
10864         if (tg3_nvram_read(tp, 0, &magic) != 0)
10865                 return -EIO;
10866
10867         if (magic == TG3_EEPROM_MAGIC)
10868                 size = NVRAM_TEST_SIZE;
10869         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10870                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10871                     TG3_EEPROM_SB_FORMAT_1) {
10872                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10873                         case TG3_EEPROM_SB_REVISION_0:
10874                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10875                                 break;
10876                         case TG3_EEPROM_SB_REVISION_2:
10877                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10878                                 break;
10879                         case TG3_EEPROM_SB_REVISION_3:
10880                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10881                                 break;
10882                         case TG3_EEPROM_SB_REVISION_4:
10883                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10884                                 break;
10885                         case TG3_EEPROM_SB_REVISION_5:
10886                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10887                                 break;
10888                         case TG3_EEPROM_SB_REVISION_6:
10889                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10890                                 break;
10891                         default:
10892                                 return -EIO;
10893                         }
10894                 } else
10895                         return 0;
10896         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10897                 size = NVRAM_SELFBOOT_HW_SIZE;
10898         else
10899                 return -EIO;
10900
10901         buf = kmalloc(size, GFP_KERNEL);
10902         if (buf == NULL)
10903                 return -ENOMEM;
10904
10905         err = -EIO;
10906         for (i = 0, j = 0; i < size; i += 4, j++) {
10907                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10908                 if (err)
10909                         break;
10910         }
10911         if (i < size)
10912                 goto out;
10913
10914         /* Selfboot format */
10915         magic = be32_to_cpu(buf[0]);
10916         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10917             TG3_EEPROM_MAGIC_FW) {
10918                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10919
10920                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10921                     TG3_EEPROM_SB_REVISION_2) {
10922                         /* For rev 2, the csum doesn't include the MBA. */
10923                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10924                                 csum8 += buf8[i];
10925                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10926                                 csum8 += buf8[i];
10927                 } else {
10928                         for (i = 0; i < size; i++)
10929                                 csum8 += buf8[i];
10930                 }
10931
10932                 if (csum8 == 0) {
10933                         err = 0;
10934                         goto out;
10935                 }
10936
10937                 err = -EIO;
10938                 goto out;
10939         }
10940
10941         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10942             TG3_EEPROM_MAGIC_HW) {
10943                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10944                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10945                 u8 *buf8 = (u8 *) buf;
10946
10947                 /* Separate the parity bits and the data bytes.  */
10948                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10949                         if ((i == 0) || (i == 8)) {
10950                                 int l;
10951                                 u8 msk;
10952
10953                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10954                                         parity[k++] = buf8[i] & msk;
10955                                 i++;
10956                         } else if (i == 16) {
10957                                 int l;
10958                                 u8 msk;
10959
10960                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10961                                         parity[k++] = buf8[i] & msk;
10962                                 i++;
10963
10964                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10965                                         parity[k++] = buf8[i] & msk;
10966                                 i++;
10967                         }
10968                         data[j++] = buf8[i];
10969                 }
10970
10971                 err = -EIO;
10972                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10973                         u8 hw8 = hweight8(data[i]);
10974
10975                         if ((hw8 & 0x1) && parity[i])
10976                                 goto out;
10977                         else if (!(hw8 & 0x1) && !parity[i])
10978                                 goto out;
10979                 }
10980                 err = 0;
10981                 goto out;
10982         }
10983
10984         err = -EIO;
10985
10986         /* Bootstrap checksum at offset 0x10 */
10987         csum = calc_crc((unsigned char *) buf, 0x10);
10988         if (csum != le32_to_cpu(buf[0x10/4]))
10989                 goto out;
10990
10991         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10992         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10993         if (csum != le32_to_cpu(buf[0xfc/4]))
10994                 goto out;
10995
10996         kfree(buf);
10997
10998         buf = tg3_vpd_readblock(tp, &len);
10999         if (!buf)
11000                 return -ENOMEM;
11001
11002         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11003         if (i > 0) {
11004                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11005                 if (j < 0)
11006                         goto out;
11007
11008                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11009                         goto out;
11010
11011                 i += PCI_VPD_LRDT_TAG_SIZE;
11012                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11013                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11014                 if (j > 0) {
11015                         u8 csum8 = 0;
11016
11017                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11018
11019                         for (i = 0; i <= j; i++)
11020                                 csum8 += ((u8 *)buf)[i];
11021
11022                         if (csum8)
11023                                 goto out;
11024                 }
11025         }
11026
11027         err = 0;
11028
11029 out:
11030         kfree(buf);
11031         return err;
11032 }
11033
11034 #define TG3_SERDES_TIMEOUT_SEC  2
11035 #define TG3_COPPER_TIMEOUT_SEC  6
11036
11037 static int tg3_test_link(struct tg3 *tp)
11038 {
11039         int i, max;
11040
11041         if (!netif_running(tp->dev))
11042                 return -ENODEV;
11043
11044         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11045                 max = TG3_SERDES_TIMEOUT_SEC;
11046         else
11047                 max = TG3_COPPER_TIMEOUT_SEC;
11048
11049         for (i = 0; i < max; i++) {
11050                 if (netif_carrier_ok(tp->dev))
11051                         return 0;
11052
11053                 if (msleep_interruptible(1000))
11054                         break;
11055         }
11056
11057         return -EIO;
11058 }
11059
11060 /* Only test the commonly used registers */
11061 static int tg3_test_registers(struct tg3 *tp)
11062 {
11063         int i, is_5705, is_5750;
11064         u32 offset, read_mask, write_mask, val, save_val, read_val;
11065         static struct {
11066                 u16 offset;
11067                 u16 flags;
11068 #define TG3_FL_5705     0x1
11069 #define TG3_FL_NOT_5705 0x2
11070 #define TG3_FL_NOT_5788 0x4
11071 #define TG3_FL_NOT_5750 0x8
11072                 u32 read_mask;
11073                 u32 write_mask;
11074         } reg_tbl[] = {
11075                 /* MAC Control Registers */
11076                 { MAC_MODE, TG3_FL_NOT_5705,
11077                         0x00000000, 0x00ef6f8c },
11078                 { MAC_MODE, TG3_FL_5705,
11079                         0x00000000, 0x01ef6b8c },
11080                 { MAC_STATUS, TG3_FL_NOT_5705,
11081                         0x03800107, 0x00000000 },
11082                 { MAC_STATUS, TG3_FL_5705,
11083                         0x03800100, 0x00000000 },
11084                 { MAC_ADDR_0_HIGH, 0x0000,
11085                         0x00000000, 0x0000ffff },
11086                 { MAC_ADDR_0_LOW, 0x0000,
11087                         0x00000000, 0xffffffff },
11088                 { MAC_RX_MTU_SIZE, 0x0000,
11089                         0x00000000, 0x0000ffff },
11090                 { MAC_TX_MODE, 0x0000,
11091                         0x00000000, 0x00000070 },
11092                 { MAC_TX_LENGTHS, 0x0000,
11093                         0x00000000, 0x00003fff },
11094                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11095                         0x00000000, 0x000007fc },
11096                 { MAC_RX_MODE, TG3_FL_5705,
11097                         0x00000000, 0x000007dc },
11098                 { MAC_HASH_REG_0, 0x0000,
11099                         0x00000000, 0xffffffff },
11100                 { MAC_HASH_REG_1, 0x0000,
11101                         0x00000000, 0xffffffff },
11102                 { MAC_HASH_REG_2, 0x0000,
11103                         0x00000000, 0xffffffff },
11104                 { MAC_HASH_REG_3, 0x0000,
11105                         0x00000000, 0xffffffff },
11106
11107                 /* Receive Data and Receive BD Initiator Control Registers. */
11108                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11109                         0x00000000, 0xffffffff },
11110                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11111                         0x00000000, 0xffffffff },
11112                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11113                         0x00000000, 0x00000003 },
11114                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11115                         0x00000000, 0xffffffff },
11116                 { RCVDBDI_STD_BD+0, 0x0000,
11117                         0x00000000, 0xffffffff },
11118                 { RCVDBDI_STD_BD+4, 0x0000,
11119                         0x00000000, 0xffffffff },
11120                 { RCVDBDI_STD_BD+8, 0x0000,
11121                         0x00000000, 0xffff0002 },
11122                 { RCVDBDI_STD_BD+0xc, 0x0000,
11123                         0x00000000, 0xffffffff },
11124
11125                 /* Receive BD Initiator Control Registers. */
11126                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11127                         0x00000000, 0xffffffff },
11128                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11129                         0x00000000, 0x000003ff },
11130                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11131                         0x00000000, 0xffffffff },
11132
11133                 /* Host Coalescing Control Registers. */
11134                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11135                         0x00000000, 0x00000004 },
11136                 { HOSTCC_MODE, TG3_FL_5705,
11137                         0x00000000, 0x000000f6 },
11138                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11139                         0x00000000, 0xffffffff },
11140                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11141                         0x00000000, 0x000003ff },
11142                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11143                         0x00000000, 0xffffffff },
11144                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11145                         0x00000000, 0x000003ff },
11146                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11147                         0x00000000, 0xffffffff },
11148                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11149                         0x00000000, 0x000000ff },
11150                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11151                         0x00000000, 0xffffffff },
11152                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11153                         0x00000000, 0x000000ff },
11154                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11155                         0x00000000, 0xffffffff },
11156                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11157                         0x00000000, 0xffffffff },
11158                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11159                         0x00000000, 0xffffffff },
11160                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11161                         0x00000000, 0x000000ff },
11162                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11163                         0x00000000, 0xffffffff },
11164                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11165                         0x00000000, 0x000000ff },
11166                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11167                         0x00000000, 0xffffffff },
11168                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11169                         0x00000000, 0xffffffff },
11170                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11171                         0x00000000, 0xffffffff },
11172                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11173                         0x00000000, 0xffffffff },
11174                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11175                         0x00000000, 0xffffffff },
11176                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11177                         0xffffffff, 0x00000000 },
11178                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11179                         0xffffffff, 0x00000000 },
11180
11181                 /* Buffer Manager Control Registers. */
11182                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11183                         0x00000000, 0x007fff80 },
11184                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11185                         0x00000000, 0x007fffff },
11186                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11187                         0x00000000, 0x0000003f },
11188                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11189                         0x00000000, 0x000001ff },
11190                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11191                         0x00000000, 0x000001ff },
11192                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11193                         0xffffffff, 0x00000000 },
11194                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11195                         0xffffffff, 0x00000000 },
11196
11197                 /* Mailbox Registers */
11198                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11199                         0x00000000, 0x000001ff },
11200                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11201                         0x00000000, 0x000001ff },
11202                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11203                         0x00000000, 0x000007ff },
11204                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11205                         0x00000000, 0x000001ff },
11206
11207                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11208         };
11209
11210         is_5705 = is_5750 = 0;
11211         if (tg3_flag(tp, 5705_PLUS)) {
11212                 is_5705 = 1;
11213                 if (tg3_flag(tp, 5750_PLUS))
11214                         is_5750 = 1;
11215         }
11216
11217         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11218                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11219                         continue;
11220
11221                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11222                         continue;
11223
11224                 if (tg3_flag(tp, IS_5788) &&
11225                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11226                         continue;
11227
11228                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11229                         continue;
11230
11231                 offset = (u32) reg_tbl[i].offset;
11232                 read_mask = reg_tbl[i].read_mask;
11233                 write_mask = reg_tbl[i].write_mask;
11234
11235                 /* Save the original register content */
11236                 save_val = tr32(offset);
11237
11238                 /* Determine the read-only value. */
11239                 read_val = save_val & read_mask;
11240
11241                 /* Write zero to the register, then make sure the read-only bits
11242                  * are not changed and the read/write bits are all zeros.
11243                  */
11244                 tw32(offset, 0);
11245
11246                 val = tr32(offset);
11247
11248                 /* Test the read-only and read/write bits. */
11249                 if (((val & read_mask) != read_val) || (val & write_mask))
11250                         goto out;
11251
11252                 /* Write ones to all the bits defined by RdMask and WrMask, then
11253                  * make sure the read-only bits are not changed and the
11254                  * read/write bits are all ones.
11255                  */
11256                 tw32(offset, read_mask | write_mask);
11257
11258                 val = tr32(offset);
11259
11260                 /* Test the read-only bits. */
11261                 if ((val & read_mask) != read_val)
11262                         goto out;
11263
11264                 /* Test the read/write bits. */
11265                 if ((val & write_mask) != write_mask)
11266                         goto out;
11267
11268                 tw32(offset, save_val);
11269         }
11270
11271         return 0;
11272
11273 out:
11274         if (netif_msg_hw(tp))
11275                 netdev_err(tp->dev,
11276                            "Register test failed at offset %x\n", offset);
11277         tw32(offset, save_val);
11278         return -EIO;
11279 }
11280
11281 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11282 {
11283         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11284         int i;
11285         u32 j;
11286
11287         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11288                 for (j = 0; j < len; j += 4) {
11289                         u32 val;
11290
11291                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11292                         tg3_read_mem(tp, offset + j, &val);
11293                         if (val != test_pattern[i])
11294                                 return -EIO;
11295                 }
11296         }
11297         return 0;
11298 }
11299
11300 static int tg3_test_memory(struct tg3 *tp)
11301 {
11302         static struct mem_entry {
11303                 u32 offset;
11304                 u32 len;
11305         } mem_tbl_570x[] = {
11306                 { 0x00000000, 0x00b50},
11307                 { 0x00002000, 0x1c000},
11308                 { 0xffffffff, 0x00000}
11309         }, mem_tbl_5705[] = {
11310                 { 0x00000100, 0x0000c},
11311                 { 0x00000200, 0x00008},
11312                 { 0x00004000, 0x00800},
11313                 { 0x00006000, 0x01000},
11314                 { 0x00008000, 0x02000},
11315                 { 0x00010000, 0x0e000},
11316                 { 0xffffffff, 0x00000}
11317         }, mem_tbl_5755[] = {
11318                 { 0x00000200, 0x00008},
11319                 { 0x00004000, 0x00800},
11320                 { 0x00006000, 0x00800},
11321                 { 0x00008000, 0x02000},
11322                 { 0x00010000, 0x0c000},
11323                 { 0xffffffff, 0x00000}
11324         }, mem_tbl_5906[] = {
11325                 { 0x00000200, 0x00008},
11326                 { 0x00004000, 0x00400},
11327                 { 0x00006000, 0x00400},
11328                 { 0x00008000, 0x01000},
11329                 { 0x00010000, 0x01000},
11330                 { 0xffffffff, 0x00000}
11331         }, mem_tbl_5717[] = {
11332                 { 0x00000200, 0x00008},
11333                 { 0x00010000, 0x0a000},
11334                 { 0x00020000, 0x13c00},
11335                 { 0xffffffff, 0x00000}
11336         }, mem_tbl_57765[] = {
11337                 { 0x00000200, 0x00008},
11338                 { 0x00004000, 0x00800},
11339                 { 0x00006000, 0x09800},
11340                 { 0x00010000, 0x0a000},
11341                 { 0xffffffff, 0x00000}
11342         };
11343         struct mem_entry *mem_tbl;
11344         int err = 0;
11345         int i;
11346
11347         if (tg3_flag(tp, 5717_PLUS))
11348                 mem_tbl = mem_tbl_5717;
11349         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11350                 mem_tbl = mem_tbl_57765;
11351         else if (tg3_flag(tp, 5755_PLUS))
11352                 mem_tbl = mem_tbl_5755;
11353         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11354                 mem_tbl = mem_tbl_5906;
11355         else if (tg3_flag(tp, 5705_PLUS))
11356                 mem_tbl = mem_tbl_5705;
11357         else
11358                 mem_tbl = mem_tbl_570x;
11359
11360         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11361                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11362                 if (err)
11363                         break;
11364         }
11365
11366         return err;
11367 }
11368
11369 #define TG3_TSO_MSS             500
11370
11371 #define TG3_TSO_IP_HDR_LEN      20
11372 #define TG3_TSO_TCP_HDR_LEN     20
11373 #define TG3_TSO_TCP_OPT_LEN     12
11374
11375 static const u8 tg3_tso_header[] = {
11376 0x08, 0x00,
11377 0x45, 0x00, 0x00, 0x00,
11378 0x00, 0x00, 0x40, 0x00,
11379 0x40, 0x06, 0x00, 0x00,
11380 0x0a, 0x00, 0x00, 0x01,
11381 0x0a, 0x00, 0x00, 0x02,
11382 0x0d, 0x00, 0xe0, 0x00,
11383 0x00, 0x00, 0x01, 0x00,
11384 0x00, 0x00, 0x02, 0x00,
11385 0x80, 0x10, 0x10, 0x00,
11386 0x14, 0x09, 0x00, 0x00,
11387 0x01, 0x01, 0x08, 0x0a,
11388 0x11, 0x11, 0x11, 0x11,
11389 0x11, 0x11, 0x11, 0x11,
11390 };
11391
11392 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11393 {
11394         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11395         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11396         u32 budget;
11397         struct sk_buff *skb, *rx_skb;
11398         u8 *tx_data;
11399         dma_addr_t map;
11400         int num_pkts, tx_len, rx_len, i, err;
11401         struct tg3_rx_buffer_desc *desc;
11402         struct tg3_napi *tnapi, *rnapi;
11403         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11404
11405         tnapi = &tp->napi[0];
11406         rnapi = &tp->napi[0];
11407         if (tp->irq_cnt > 1) {
11408                 if (tg3_flag(tp, ENABLE_RSS))
11409                         rnapi = &tp->napi[1];
11410                 if (tg3_flag(tp, ENABLE_TSS))
11411                         tnapi = &tp->napi[1];
11412         }
11413         coal_now = tnapi->coal_now | rnapi->coal_now;
11414
11415         err = -EIO;
11416
11417         tx_len = pktsz;
11418         skb = netdev_alloc_skb(tp->dev, tx_len);
11419         if (!skb)
11420                 return -ENOMEM;
11421
11422         tx_data = skb_put(skb, tx_len);
11423         memcpy(tx_data, tp->dev->dev_addr, 6);
11424         memset(tx_data + 6, 0x0, 8);
11425
11426         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11427
11428         if (tso_loopback) {
11429                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11430
11431                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11432                               TG3_TSO_TCP_OPT_LEN;
11433
11434                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11435                        sizeof(tg3_tso_header));
11436                 mss = TG3_TSO_MSS;
11437
11438                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11439                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11440
11441                 /* Set the total length field in the IP header */
11442                 iph->tot_len = htons((u16)(mss + hdr_len));
11443
11444                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11445                               TXD_FLAG_CPU_POST_DMA);
11446
11447                 if (tg3_flag(tp, HW_TSO_1) ||
11448                     tg3_flag(tp, HW_TSO_2) ||
11449                     tg3_flag(tp, HW_TSO_3)) {
11450                         struct tcphdr *th;
11451                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11452                         th = (struct tcphdr *)&tx_data[val];
11453                         th->check = 0;
11454                 } else
11455                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11456
11457                 if (tg3_flag(tp, HW_TSO_3)) {
11458                         mss |= (hdr_len & 0xc) << 12;
11459                         if (hdr_len & 0x10)
11460                                 base_flags |= 0x00000010;
11461                         base_flags |= (hdr_len & 0x3e0) << 5;
11462                 } else if (tg3_flag(tp, HW_TSO_2))
11463                         mss |= hdr_len << 9;
11464                 else if (tg3_flag(tp, HW_TSO_1) ||
11465                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11466                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11467                 } else {
11468                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11469                 }
11470
11471                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11472         } else {
11473                 num_pkts = 1;
11474                 data_off = ETH_HLEN;
11475         }
11476
11477         for (i = data_off; i < tx_len; i++)
11478                 tx_data[i] = (u8) (i & 0xff);
11479
11480         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11481         if (pci_dma_mapping_error(tp->pdev, map)) {
11482                 dev_kfree_skb(skb);
11483                 return -EIO;
11484         }
11485
11486         val = tnapi->tx_prod;
11487         tnapi->tx_buffers[val].skb = skb;
11488         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11489
11490         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11491                rnapi->coal_now);
11492
11493         udelay(10);
11494
11495         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11496
11497         budget = tg3_tx_avail(tnapi);
11498         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11499                             base_flags | TXD_FLAG_END, mss, 0)) {
11500                 tnapi->tx_buffers[val].skb = NULL;
11501                 dev_kfree_skb(skb);
11502                 return -EIO;
11503         }
11504
11505         tnapi->tx_prod++;
11506
11507         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11508         tr32_mailbox(tnapi->prodmbox);
11509
11510         udelay(10);
11511
11512         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11513         for (i = 0; i < 35; i++) {
11514                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11515                        coal_now);
11516
11517                 udelay(10);
11518
11519                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11520                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11521                 if ((tx_idx == tnapi->tx_prod) &&
11522                     (rx_idx == (rx_start_idx + num_pkts)))
11523                         break;
11524         }
11525
11526         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
11527         dev_kfree_skb(skb);
11528
11529         if (tx_idx != tnapi->tx_prod)
11530                 goto out;
11531
11532         if (rx_idx != rx_start_idx + num_pkts)
11533                 goto out;
11534
11535         val = data_off;
11536         while (rx_idx != rx_start_idx) {
11537                 desc = &rnapi->rx_rcb[rx_start_idx++];
11538                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11539                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11540
11541                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11542                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11543                         goto out;
11544
11545                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11546                          - ETH_FCS_LEN;
11547
11548                 if (!tso_loopback) {
11549                         if (rx_len != tx_len)
11550                                 goto out;
11551
11552                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11553                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11554                                         goto out;
11555                         } else {
11556                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11557                                         goto out;
11558                         }
11559                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11560                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11561                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11562                         goto out;
11563                 }
11564
11565                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11566                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11567                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11568                                              mapping);
11569                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11570                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11571                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11572                                              mapping);
11573                 } else
11574                         goto out;
11575
11576                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11577                                             PCI_DMA_FROMDEVICE);
11578
11579                 for (i = data_off; i < rx_len; i++, val++) {
11580                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11581                                 goto out;
11582                 }
11583         }
11584
11585         err = 0;
11586
11587         /* tg3_free_rings will unmap and free the rx_skb */
11588 out:
11589         return err;
11590 }
11591
11592 #define TG3_STD_LOOPBACK_FAILED         1
11593 #define TG3_JMB_LOOPBACK_FAILED         2
11594 #define TG3_TSO_LOOPBACK_FAILED         4
11595 #define TG3_LOOPBACK_FAILED \
11596         (TG3_STD_LOOPBACK_FAILED | \
11597          TG3_JMB_LOOPBACK_FAILED | \
11598          TG3_TSO_LOOPBACK_FAILED)
11599
11600 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11601 {
11602         int err = -EIO;
11603         u32 eee_cap;
11604
11605         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11606         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11607
11608         if (!netif_running(tp->dev)) {
11609                 data[0] = TG3_LOOPBACK_FAILED;
11610                 data[1] = TG3_LOOPBACK_FAILED;
11611                 if (do_extlpbk)
11612                         data[2] = TG3_LOOPBACK_FAILED;
11613                 goto done;
11614         }
11615
11616         err = tg3_reset_hw(tp, 1);
11617         if (err) {
11618                 data[0] = TG3_LOOPBACK_FAILED;
11619                 data[1] = TG3_LOOPBACK_FAILED;
11620                 if (do_extlpbk)
11621                         data[2] = TG3_LOOPBACK_FAILED;
11622                 goto done;
11623         }
11624
11625         if (tg3_flag(tp, ENABLE_RSS)) {
11626                 int i;
11627
11628                 /* Reroute all rx packets to the 1st queue */
11629                 for (i = MAC_RSS_INDIR_TBL_0;
11630                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11631                         tw32(i, 0x0);
11632         }
11633
11634         /* HW errata - mac loopback fails in some cases on 5780.
11635          * Normal traffic and PHY loopback are not affected by
11636          * errata.  Also, the MAC loopback test is deprecated for
11637          * all newer ASIC revisions.
11638          */
11639         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11640             !tg3_flag(tp, CPMU_PRESENT)) {
11641                 tg3_mac_loopback(tp, true);
11642
11643                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11644                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11645
11646                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11647                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11648                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11649
11650                 tg3_mac_loopback(tp, false);
11651         }
11652
11653         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11654             !tg3_flag(tp, USE_PHYLIB)) {
11655                 int i;
11656
11657                 tg3_phy_lpbk_set(tp, 0, false);
11658
11659                 /* Wait for link */
11660                 for (i = 0; i < 100; i++) {
11661                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11662                                 break;
11663                         mdelay(1);
11664                 }
11665
11666                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11667                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11668                 if (tg3_flag(tp, TSO_CAPABLE) &&
11669                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11670                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11671                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11672                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11673                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11674
11675                 if (do_extlpbk) {
11676                         tg3_phy_lpbk_set(tp, 0, true);
11677
11678                         /* All link indications report up, but the hardware
11679                          * isn't really ready for about 20 msec.  Double it
11680                          * to be sure.
11681                          */
11682                         mdelay(40);
11683
11684                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11685                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11686                         if (tg3_flag(tp, TSO_CAPABLE) &&
11687                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11688                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11689                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11690                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11691                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11692                 }
11693
11694                 /* Re-enable gphy autopowerdown. */
11695                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11696                         tg3_phy_toggle_apd(tp, true);
11697         }
11698
11699         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11700
11701 done:
11702         tp->phy_flags |= eee_cap;
11703
11704         return err;
11705 }
11706
11707 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11708                           u64 *data)
11709 {
11710         struct tg3 *tp = netdev_priv(dev);
11711         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11712
11713         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11714             tg3_power_up(tp)) {
11715                 etest->flags |= ETH_TEST_FL_FAILED;
11716                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11717                 return;
11718         }
11719
11720         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11721
11722         if (tg3_test_nvram(tp) != 0) {
11723                 etest->flags |= ETH_TEST_FL_FAILED;
11724                 data[0] = 1;
11725         }
11726         if (!doextlpbk && tg3_test_link(tp)) {
11727                 etest->flags |= ETH_TEST_FL_FAILED;
11728                 data[1] = 1;
11729         }
11730         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11731                 int err, err2 = 0, irq_sync = 0;
11732
11733                 if (netif_running(dev)) {
11734                         tg3_phy_stop(tp);
11735                         tg3_netif_stop(tp);
11736                         irq_sync = 1;
11737                 }
11738
11739                 tg3_full_lock(tp, irq_sync);
11740
11741                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11742                 err = tg3_nvram_lock(tp);
11743                 tg3_halt_cpu(tp, RX_CPU_BASE);
11744                 if (!tg3_flag(tp, 5705_PLUS))
11745                         tg3_halt_cpu(tp, TX_CPU_BASE);
11746                 if (!err)
11747                         tg3_nvram_unlock(tp);
11748
11749                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11750                         tg3_phy_reset(tp);
11751
11752                 if (tg3_test_registers(tp) != 0) {
11753                         etest->flags |= ETH_TEST_FL_FAILED;
11754                         data[2] = 1;
11755                 }
11756
11757                 if (tg3_test_memory(tp) != 0) {
11758                         etest->flags |= ETH_TEST_FL_FAILED;
11759                         data[3] = 1;
11760                 }
11761
11762                 if (doextlpbk)
11763                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11764
11765                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11766                         etest->flags |= ETH_TEST_FL_FAILED;
11767
11768                 tg3_full_unlock(tp);
11769
11770                 if (tg3_test_interrupt(tp) != 0) {
11771                         etest->flags |= ETH_TEST_FL_FAILED;
11772                         data[7] = 1;
11773                 }
11774
11775                 tg3_full_lock(tp, 0);
11776
11777                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11778                 if (netif_running(dev)) {
11779                         tg3_flag_set(tp, INIT_COMPLETE);
11780                         err2 = tg3_restart_hw(tp, 1);
11781                         if (!err2)
11782                                 tg3_netif_start(tp);
11783                 }
11784
11785                 tg3_full_unlock(tp);
11786
11787                 if (irq_sync && !err2)
11788                         tg3_phy_start(tp);
11789         }
11790         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11791                 tg3_power_down(tp);
11792
11793 }
11794
11795 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11796 {
11797         struct mii_ioctl_data *data = if_mii(ifr);
11798         struct tg3 *tp = netdev_priv(dev);
11799         int err;
11800
11801         if (tg3_flag(tp, USE_PHYLIB)) {
11802                 struct phy_device *phydev;
11803                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11804                         return -EAGAIN;
11805                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11806                 return phy_mii_ioctl(phydev, ifr, cmd);
11807         }
11808
11809         switch (cmd) {
11810         case SIOCGMIIPHY:
11811                 data->phy_id = tp->phy_addr;
11812
11813                 /* fallthru */
11814         case SIOCGMIIREG: {
11815                 u32 mii_regval;
11816
11817                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11818                         break;                  /* We have no PHY */
11819
11820                 if (!netif_running(dev))
11821                         return -EAGAIN;
11822
11823                 spin_lock_bh(&tp->lock);
11824                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11825                 spin_unlock_bh(&tp->lock);
11826
11827                 data->val_out = mii_regval;
11828
11829                 return err;
11830         }
11831
11832         case SIOCSMIIREG:
11833                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11834                         break;                  /* We have no PHY */
11835
11836                 if (!netif_running(dev))
11837                         return -EAGAIN;
11838
11839                 spin_lock_bh(&tp->lock);
11840                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11841                 spin_unlock_bh(&tp->lock);
11842
11843                 return err;
11844
11845         default:
11846                 /* do nothing */
11847                 break;
11848         }
11849         return -EOPNOTSUPP;
11850 }
11851
11852 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11853 {
11854         struct tg3 *tp = netdev_priv(dev);
11855
11856         memcpy(ec, &tp->coal, sizeof(*ec));
11857         return 0;
11858 }
11859
11860 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11861 {
11862         struct tg3 *tp = netdev_priv(dev);
11863         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11864         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11865
11866         if (!tg3_flag(tp, 5705_PLUS)) {
11867                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11868                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11869                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11870                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11871         }
11872
11873         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11874             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11875             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11876             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11877             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11878             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11879             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11880             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11881             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11882             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11883                 return -EINVAL;
11884
11885         /* No rx interrupts will be generated if both are zero */
11886         if ((ec->rx_coalesce_usecs == 0) &&
11887             (ec->rx_max_coalesced_frames == 0))
11888                 return -EINVAL;
11889
11890         /* No tx interrupts will be generated if both are zero */
11891         if ((ec->tx_coalesce_usecs == 0) &&
11892             (ec->tx_max_coalesced_frames == 0))
11893                 return -EINVAL;
11894
11895         /* Only copy relevant parameters, ignore all others. */
11896         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11897         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11898         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11899         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11900         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11901         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11902         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11903         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11904         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11905
11906         if (netif_running(dev)) {
11907                 tg3_full_lock(tp, 0);
11908                 __tg3_set_coalesce(tp, &tp->coal);
11909                 tg3_full_unlock(tp);
11910         }
11911         return 0;
11912 }
11913
11914 static const struct ethtool_ops tg3_ethtool_ops = {
11915         .get_settings           = tg3_get_settings,
11916         .set_settings           = tg3_set_settings,
11917         .get_drvinfo            = tg3_get_drvinfo,
11918         .get_regs_len           = tg3_get_regs_len,
11919         .get_regs               = tg3_get_regs,
11920         .get_wol                = tg3_get_wol,
11921         .set_wol                = tg3_set_wol,
11922         .get_msglevel           = tg3_get_msglevel,
11923         .set_msglevel           = tg3_set_msglevel,
11924         .nway_reset             = tg3_nway_reset,
11925         .get_link               = ethtool_op_get_link,
11926         .get_eeprom_len         = tg3_get_eeprom_len,
11927         .get_eeprom             = tg3_get_eeprom,
11928         .set_eeprom             = tg3_set_eeprom,
11929         .get_ringparam          = tg3_get_ringparam,
11930         .set_ringparam          = tg3_set_ringparam,
11931         .get_pauseparam         = tg3_get_pauseparam,
11932         .set_pauseparam         = tg3_set_pauseparam,
11933         .self_test              = tg3_self_test,
11934         .get_strings            = tg3_get_strings,
11935         .set_phys_id            = tg3_set_phys_id,
11936         .get_ethtool_stats      = tg3_get_ethtool_stats,
11937         .get_coalesce           = tg3_get_coalesce,
11938         .set_coalesce           = tg3_set_coalesce,
11939         .get_sset_count         = tg3_get_sset_count,
11940 };
11941
11942 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11943 {
11944         u32 cursize, val, magic;
11945
11946         tp->nvram_size = EEPROM_CHIP_SIZE;
11947
11948         if (tg3_nvram_read(tp, 0, &magic) != 0)
11949                 return;
11950
11951         if ((magic != TG3_EEPROM_MAGIC) &&
11952             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11953             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11954                 return;
11955
11956         /*
11957          * Size the chip by reading offsets at increasing powers of two.
11958          * When we encounter our validation signature, we know the addressing
11959          * has wrapped around, and thus have our chip size.
11960          */
11961         cursize = 0x10;
11962
11963         while (cursize < tp->nvram_size) {
11964                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11965                         return;
11966
11967                 if (val == magic)
11968                         break;
11969
11970                 cursize <<= 1;
11971         }
11972
11973         tp->nvram_size = cursize;
11974 }
11975
11976 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11977 {
11978         u32 val;
11979
11980         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11981                 return;
11982
11983         /* Selfboot format */
11984         if (val != TG3_EEPROM_MAGIC) {
11985                 tg3_get_eeprom_size(tp);
11986                 return;
11987         }
11988
11989         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11990                 if (val != 0) {
11991                         /* This is confusing.  We want to operate on the
11992                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11993                          * call will read from NVRAM and byteswap the data
11994                          * according to the byteswapping settings for all
11995                          * other register accesses.  This ensures the data we
11996                          * want will always reside in the lower 16-bits.
11997                          * However, the data in NVRAM is in LE format, which
11998                          * means the data from the NVRAM read will always be
11999                          * opposite the endianness of the CPU.  The 16-bit
12000                          * byteswap then brings the data to CPU endianness.
12001                          */
12002                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12003                         return;
12004                 }
12005         }
12006         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12007 }
12008
12009 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12010 {
12011         u32 nvcfg1;
12012
12013         nvcfg1 = tr32(NVRAM_CFG1);
12014         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12015                 tg3_flag_set(tp, FLASH);
12016         } else {
12017                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12018                 tw32(NVRAM_CFG1, nvcfg1);
12019         }
12020
12021         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12022             tg3_flag(tp, 5780_CLASS)) {
12023                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12024                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12025                         tp->nvram_jedecnum = JEDEC_ATMEL;
12026                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12027                         tg3_flag_set(tp, NVRAM_BUFFERED);
12028                         break;
12029                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12030                         tp->nvram_jedecnum = JEDEC_ATMEL;
12031                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12032                         break;
12033                 case FLASH_VENDOR_ATMEL_EEPROM:
12034                         tp->nvram_jedecnum = JEDEC_ATMEL;
12035                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12036                         tg3_flag_set(tp, NVRAM_BUFFERED);
12037                         break;
12038                 case FLASH_VENDOR_ST:
12039                         tp->nvram_jedecnum = JEDEC_ST;
12040                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12041                         tg3_flag_set(tp, NVRAM_BUFFERED);
12042                         break;
12043                 case FLASH_VENDOR_SAIFUN:
12044                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12045                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12046                         break;
12047                 case FLASH_VENDOR_SST_SMALL:
12048                 case FLASH_VENDOR_SST_LARGE:
12049                         tp->nvram_jedecnum = JEDEC_SST;
12050                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12051                         break;
12052                 }
12053         } else {
12054                 tp->nvram_jedecnum = JEDEC_ATMEL;
12055                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12056                 tg3_flag_set(tp, NVRAM_BUFFERED);
12057         }
12058 }
12059
12060 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12061 {
12062         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12063         case FLASH_5752PAGE_SIZE_256:
12064                 tp->nvram_pagesize = 256;
12065                 break;
12066         case FLASH_5752PAGE_SIZE_512:
12067                 tp->nvram_pagesize = 512;
12068                 break;
12069         case FLASH_5752PAGE_SIZE_1K:
12070                 tp->nvram_pagesize = 1024;
12071                 break;
12072         case FLASH_5752PAGE_SIZE_2K:
12073                 tp->nvram_pagesize = 2048;
12074                 break;
12075         case FLASH_5752PAGE_SIZE_4K:
12076                 tp->nvram_pagesize = 4096;
12077                 break;
12078         case FLASH_5752PAGE_SIZE_264:
12079                 tp->nvram_pagesize = 264;
12080                 break;
12081         case FLASH_5752PAGE_SIZE_528:
12082                 tp->nvram_pagesize = 528;
12083                 break;
12084         }
12085 }
12086
12087 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12088 {
12089         u32 nvcfg1;
12090
12091         nvcfg1 = tr32(NVRAM_CFG1);
12092
12093         /* NVRAM protection for TPM */
12094         if (nvcfg1 & (1 << 27))
12095                 tg3_flag_set(tp, PROTECTED_NVRAM);
12096
12097         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12098         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12099         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12100                 tp->nvram_jedecnum = JEDEC_ATMEL;
12101                 tg3_flag_set(tp, NVRAM_BUFFERED);
12102                 break;
12103         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12104                 tp->nvram_jedecnum = JEDEC_ATMEL;
12105                 tg3_flag_set(tp, NVRAM_BUFFERED);
12106                 tg3_flag_set(tp, FLASH);
12107                 break;
12108         case FLASH_5752VENDOR_ST_M45PE10:
12109         case FLASH_5752VENDOR_ST_M45PE20:
12110         case FLASH_5752VENDOR_ST_M45PE40:
12111                 tp->nvram_jedecnum = JEDEC_ST;
12112                 tg3_flag_set(tp, NVRAM_BUFFERED);
12113                 tg3_flag_set(tp, FLASH);
12114                 break;
12115         }
12116
12117         if (tg3_flag(tp, FLASH)) {
12118                 tg3_nvram_get_pagesize(tp, nvcfg1);
12119         } else {
12120                 /* For eeprom, set pagesize to maximum eeprom size */
12121                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12122
12123                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12124                 tw32(NVRAM_CFG1, nvcfg1);
12125         }
12126 }
12127
12128 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12129 {
12130         u32 nvcfg1, protect = 0;
12131
12132         nvcfg1 = tr32(NVRAM_CFG1);
12133
12134         /* NVRAM protection for TPM */
12135         if (nvcfg1 & (1 << 27)) {
12136                 tg3_flag_set(tp, PROTECTED_NVRAM);
12137                 protect = 1;
12138         }
12139
12140         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12141         switch (nvcfg1) {
12142         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12143         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12144         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12145         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12146                 tp->nvram_jedecnum = JEDEC_ATMEL;
12147                 tg3_flag_set(tp, NVRAM_BUFFERED);
12148                 tg3_flag_set(tp, FLASH);
12149                 tp->nvram_pagesize = 264;
12150                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12151                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12152                         tp->nvram_size = (protect ? 0x3e200 :
12153                                           TG3_NVRAM_SIZE_512KB);
12154                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12155                         tp->nvram_size = (protect ? 0x1f200 :
12156                                           TG3_NVRAM_SIZE_256KB);
12157                 else
12158                         tp->nvram_size = (protect ? 0x1f200 :
12159                                           TG3_NVRAM_SIZE_128KB);
12160                 break;
12161         case FLASH_5752VENDOR_ST_M45PE10:
12162         case FLASH_5752VENDOR_ST_M45PE20:
12163         case FLASH_5752VENDOR_ST_M45PE40:
12164                 tp->nvram_jedecnum = JEDEC_ST;
12165                 tg3_flag_set(tp, NVRAM_BUFFERED);
12166                 tg3_flag_set(tp, FLASH);
12167                 tp->nvram_pagesize = 256;
12168                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12169                         tp->nvram_size = (protect ?
12170                                           TG3_NVRAM_SIZE_64KB :
12171                                           TG3_NVRAM_SIZE_128KB);
12172                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12173                         tp->nvram_size = (protect ?
12174                                           TG3_NVRAM_SIZE_64KB :
12175                                           TG3_NVRAM_SIZE_256KB);
12176                 else
12177                         tp->nvram_size = (protect ?
12178                                           TG3_NVRAM_SIZE_128KB :
12179                                           TG3_NVRAM_SIZE_512KB);
12180                 break;
12181         }
12182 }
12183
12184 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12185 {
12186         u32 nvcfg1;
12187
12188         nvcfg1 = tr32(NVRAM_CFG1);
12189
12190         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12191         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12192         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12193         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12194         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12195                 tp->nvram_jedecnum = JEDEC_ATMEL;
12196                 tg3_flag_set(tp, NVRAM_BUFFERED);
12197                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12198
12199                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12200                 tw32(NVRAM_CFG1, nvcfg1);
12201                 break;
12202         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12203         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12204         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12205         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12206                 tp->nvram_jedecnum = JEDEC_ATMEL;
12207                 tg3_flag_set(tp, NVRAM_BUFFERED);
12208                 tg3_flag_set(tp, FLASH);
12209                 tp->nvram_pagesize = 264;
12210                 break;
12211         case FLASH_5752VENDOR_ST_M45PE10:
12212         case FLASH_5752VENDOR_ST_M45PE20:
12213         case FLASH_5752VENDOR_ST_M45PE40:
12214                 tp->nvram_jedecnum = JEDEC_ST;
12215                 tg3_flag_set(tp, NVRAM_BUFFERED);
12216                 tg3_flag_set(tp, FLASH);
12217                 tp->nvram_pagesize = 256;
12218                 break;
12219         }
12220 }
12221
12222 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12223 {
12224         u32 nvcfg1, protect = 0;
12225
12226         nvcfg1 = tr32(NVRAM_CFG1);
12227
12228         /* NVRAM protection for TPM */
12229         if (nvcfg1 & (1 << 27)) {
12230                 tg3_flag_set(tp, PROTECTED_NVRAM);
12231                 protect = 1;
12232         }
12233
12234         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12235         switch (nvcfg1) {
12236         case FLASH_5761VENDOR_ATMEL_ADB021D:
12237         case FLASH_5761VENDOR_ATMEL_ADB041D:
12238         case FLASH_5761VENDOR_ATMEL_ADB081D:
12239         case FLASH_5761VENDOR_ATMEL_ADB161D:
12240         case FLASH_5761VENDOR_ATMEL_MDB021D:
12241         case FLASH_5761VENDOR_ATMEL_MDB041D:
12242         case FLASH_5761VENDOR_ATMEL_MDB081D:
12243         case FLASH_5761VENDOR_ATMEL_MDB161D:
12244                 tp->nvram_jedecnum = JEDEC_ATMEL;
12245                 tg3_flag_set(tp, NVRAM_BUFFERED);
12246                 tg3_flag_set(tp, FLASH);
12247                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12248                 tp->nvram_pagesize = 256;
12249                 break;
12250         case FLASH_5761VENDOR_ST_A_M45PE20:
12251         case FLASH_5761VENDOR_ST_A_M45PE40:
12252         case FLASH_5761VENDOR_ST_A_M45PE80:
12253         case FLASH_5761VENDOR_ST_A_M45PE16:
12254         case FLASH_5761VENDOR_ST_M_M45PE20:
12255         case FLASH_5761VENDOR_ST_M_M45PE40:
12256         case FLASH_5761VENDOR_ST_M_M45PE80:
12257         case FLASH_5761VENDOR_ST_M_M45PE16:
12258                 tp->nvram_jedecnum = JEDEC_ST;
12259                 tg3_flag_set(tp, NVRAM_BUFFERED);
12260                 tg3_flag_set(tp, FLASH);
12261                 tp->nvram_pagesize = 256;
12262                 break;
12263         }
12264
12265         if (protect) {
12266                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12267         } else {
12268                 switch (nvcfg1) {
12269                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12270                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12271                 case FLASH_5761VENDOR_ST_A_M45PE16:
12272                 case FLASH_5761VENDOR_ST_M_M45PE16:
12273                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12274                         break;
12275                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12276                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12277                 case FLASH_5761VENDOR_ST_A_M45PE80:
12278                 case FLASH_5761VENDOR_ST_M_M45PE80:
12279                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12280                         break;
12281                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12282                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12283                 case FLASH_5761VENDOR_ST_A_M45PE40:
12284                 case FLASH_5761VENDOR_ST_M_M45PE40:
12285                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12286                         break;
12287                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12288                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12289                 case FLASH_5761VENDOR_ST_A_M45PE20:
12290                 case FLASH_5761VENDOR_ST_M_M45PE20:
12291                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12292                         break;
12293                 }
12294         }
12295 }
12296
12297 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12298 {
12299         tp->nvram_jedecnum = JEDEC_ATMEL;
12300         tg3_flag_set(tp, NVRAM_BUFFERED);
12301         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12302 }
12303
12304 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12305 {
12306         u32 nvcfg1;
12307
12308         nvcfg1 = tr32(NVRAM_CFG1);
12309
12310         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12311         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12312         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12313                 tp->nvram_jedecnum = JEDEC_ATMEL;
12314                 tg3_flag_set(tp, NVRAM_BUFFERED);
12315                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12316
12317                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12318                 tw32(NVRAM_CFG1, nvcfg1);
12319                 return;
12320         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12321         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12322         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12323         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12324         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12325         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12326         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12327                 tp->nvram_jedecnum = JEDEC_ATMEL;
12328                 tg3_flag_set(tp, NVRAM_BUFFERED);
12329                 tg3_flag_set(tp, FLASH);
12330
12331                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12332                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12333                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12334                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12335                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12336                         break;
12337                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12338                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12339                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12340                         break;
12341                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12342                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12343                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12344                         break;
12345                 }
12346                 break;
12347         case FLASH_5752VENDOR_ST_M45PE10:
12348         case FLASH_5752VENDOR_ST_M45PE20:
12349         case FLASH_5752VENDOR_ST_M45PE40:
12350                 tp->nvram_jedecnum = JEDEC_ST;
12351                 tg3_flag_set(tp, NVRAM_BUFFERED);
12352                 tg3_flag_set(tp, FLASH);
12353
12354                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12355                 case FLASH_5752VENDOR_ST_M45PE10:
12356                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12357                         break;
12358                 case FLASH_5752VENDOR_ST_M45PE20:
12359                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12360                         break;
12361                 case FLASH_5752VENDOR_ST_M45PE40:
12362                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12363                         break;
12364                 }
12365                 break;
12366         default:
12367                 tg3_flag_set(tp, NO_NVRAM);
12368                 return;
12369         }
12370
12371         tg3_nvram_get_pagesize(tp, nvcfg1);
12372         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12373                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12374 }
12375
12376
12377 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12378 {
12379         u32 nvcfg1;
12380
12381         nvcfg1 = tr32(NVRAM_CFG1);
12382
12383         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12384         case FLASH_5717VENDOR_ATMEL_EEPROM:
12385         case FLASH_5717VENDOR_MICRO_EEPROM:
12386                 tp->nvram_jedecnum = JEDEC_ATMEL;
12387                 tg3_flag_set(tp, NVRAM_BUFFERED);
12388                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12389
12390                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12391                 tw32(NVRAM_CFG1, nvcfg1);
12392                 return;
12393         case FLASH_5717VENDOR_ATMEL_MDB011D:
12394         case FLASH_5717VENDOR_ATMEL_ADB011B:
12395         case FLASH_5717VENDOR_ATMEL_ADB011D:
12396         case FLASH_5717VENDOR_ATMEL_MDB021D:
12397         case FLASH_5717VENDOR_ATMEL_ADB021B:
12398         case FLASH_5717VENDOR_ATMEL_ADB021D:
12399         case FLASH_5717VENDOR_ATMEL_45USPT:
12400                 tp->nvram_jedecnum = JEDEC_ATMEL;
12401                 tg3_flag_set(tp, NVRAM_BUFFERED);
12402                 tg3_flag_set(tp, FLASH);
12403
12404                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12405                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12406                         /* Detect size with tg3_nvram_get_size() */
12407                         break;
12408                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12409                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12410                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12411                         break;
12412                 default:
12413                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12414                         break;
12415                 }
12416                 break;
12417         case FLASH_5717VENDOR_ST_M_M25PE10:
12418         case FLASH_5717VENDOR_ST_A_M25PE10:
12419         case FLASH_5717VENDOR_ST_M_M45PE10:
12420         case FLASH_5717VENDOR_ST_A_M45PE10:
12421         case FLASH_5717VENDOR_ST_M_M25PE20:
12422         case FLASH_5717VENDOR_ST_A_M25PE20:
12423         case FLASH_5717VENDOR_ST_M_M45PE20:
12424         case FLASH_5717VENDOR_ST_A_M45PE20:
12425         case FLASH_5717VENDOR_ST_25USPT:
12426         case FLASH_5717VENDOR_ST_45USPT:
12427                 tp->nvram_jedecnum = JEDEC_ST;
12428                 tg3_flag_set(tp, NVRAM_BUFFERED);
12429                 tg3_flag_set(tp, FLASH);
12430
12431                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12432                 case FLASH_5717VENDOR_ST_M_M25PE20:
12433                 case FLASH_5717VENDOR_ST_M_M45PE20:
12434                         /* Detect size with tg3_nvram_get_size() */
12435                         break;
12436                 case FLASH_5717VENDOR_ST_A_M25PE20:
12437                 case FLASH_5717VENDOR_ST_A_M45PE20:
12438                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12439                         break;
12440                 default:
12441                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12442                         break;
12443                 }
12444                 break;
12445         default:
12446                 tg3_flag_set(tp, NO_NVRAM);
12447                 return;
12448         }
12449
12450         tg3_nvram_get_pagesize(tp, nvcfg1);
12451         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12452                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12453 }
12454
12455 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12456 {
12457         u32 nvcfg1, nvmpinstrp;
12458
12459         nvcfg1 = tr32(NVRAM_CFG1);
12460         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12461
12462         switch (nvmpinstrp) {
12463         case FLASH_5720_EEPROM_HD:
12464         case FLASH_5720_EEPROM_LD:
12465                 tp->nvram_jedecnum = JEDEC_ATMEL;
12466                 tg3_flag_set(tp, NVRAM_BUFFERED);
12467
12468                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12469                 tw32(NVRAM_CFG1, nvcfg1);
12470                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12471                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12472                 else
12473                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12474                 return;
12475         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12476         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12477         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12478         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12479         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12480         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12481         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12482         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12483         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12484         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12485         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12486         case FLASH_5720VENDOR_ATMEL_45USPT:
12487                 tp->nvram_jedecnum = JEDEC_ATMEL;
12488                 tg3_flag_set(tp, NVRAM_BUFFERED);
12489                 tg3_flag_set(tp, FLASH);
12490
12491                 switch (nvmpinstrp) {
12492                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12493                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12494                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12495                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12496                         break;
12497                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12498                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12499                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12500                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12501                         break;
12502                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12503                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12504                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12505                         break;
12506                 default:
12507                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12508                         break;
12509                 }
12510                 break;
12511         case FLASH_5720VENDOR_M_ST_M25PE10:
12512         case FLASH_5720VENDOR_M_ST_M45PE10:
12513         case FLASH_5720VENDOR_A_ST_M25PE10:
12514         case FLASH_5720VENDOR_A_ST_M45PE10:
12515         case FLASH_5720VENDOR_M_ST_M25PE20:
12516         case FLASH_5720VENDOR_M_ST_M45PE20:
12517         case FLASH_5720VENDOR_A_ST_M25PE20:
12518         case FLASH_5720VENDOR_A_ST_M45PE20:
12519         case FLASH_5720VENDOR_M_ST_M25PE40:
12520         case FLASH_5720VENDOR_M_ST_M45PE40:
12521         case FLASH_5720VENDOR_A_ST_M25PE40:
12522         case FLASH_5720VENDOR_A_ST_M45PE40:
12523         case FLASH_5720VENDOR_M_ST_M25PE80:
12524         case FLASH_5720VENDOR_M_ST_M45PE80:
12525         case FLASH_5720VENDOR_A_ST_M25PE80:
12526         case FLASH_5720VENDOR_A_ST_M45PE80:
12527         case FLASH_5720VENDOR_ST_25USPT:
12528         case FLASH_5720VENDOR_ST_45USPT:
12529                 tp->nvram_jedecnum = JEDEC_ST;
12530                 tg3_flag_set(tp, NVRAM_BUFFERED);
12531                 tg3_flag_set(tp, FLASH);
12532
12533                 switch (nvmpinstrp) {
12534                 case FLASH_5720VENDOR_M_ST_M25PE20:
12535                 case FLASH_5720VENDOR_M_ST_M45PE20:
12536                 case FLASH_5720VENDOR_A_ST_M25PE20:
12537                 case FLASH_5720VENDOR_A_ST_M45PE20:
12538                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12539                         break;
12540                 case FLASH_5720VENDOR_M_ST_M25PE40:
12541                 case FLASH_5720VENDOR_M_ST_M45PE40:
12542                 case FLASH_5720VENDOR_A_ST_M25PE40:
12543                 case FLASH_5720VENDOR_A_ST_M45PE40:
12544                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12545                         break;
12546                 case FLASH_5720VENDOR_M_ST_M25PE80:
12547                 case FLASH_5720VENDOR_M_ST_M45PE80:
12548                 case FLASH_5720VENDOR_A_ST_M25PE80:
12549                 case FLASH_5720VENDOR_A_ST_M45PE80:
12550                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12551                         break;
12552                 default:
12553                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12554                         break;
12555                 }
12556                 break;
12557         default:
12558                 tg3_flag_set(tp, NO_NVRAM);
12559                 return;
12560         }
12561
12562         tg3_nvram_get_pagesize(tp, nvcfg1);
12563         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12564                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12565 }
12566
12567 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12568 static void __devinit tg3_nvram_init(struct tg3 *tp)
12569 {
12570         tw32_f(GRC_EEPROM_ADDR,
12571              (EEPROM_ADDR_FSM_RESET |
12572               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12573                EEPROM_ADDR_CLKPERD_SHIFT)));
12574
12575         msleep(1);
12576
12577         /* Enable seeprom accesses. */
12578         tw32_f(GRC_LOCAL_CTRL,
12579              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12580         udelay(100);
12581
12582         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12583             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12584                 tg3_flag_set(tp, NVRAM);
12585
12586                 if (tg3_nvram_lock(tp)) {
12587                         netdev_warn(tp->dev,
12588                                     "Cannot get nvram lock, %s failed\n",
12589                                     __func__);
12590                         return;
12591                 }
12592                 tg3_enable_nvram_access(tp);
12593
12594                 tp->nvram_size = 0;
12595
12596                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12597                         tg3_get_5752_nvram_info(tp);
12598                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12599                         tg3_get_5755_nvram_info(tp);
12600                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12601                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12602                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12603                         tg3_get_5787_nvram_info(tp);
12604                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12605                         tg3_get_5761_nvram_info(tp);
12606                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12607                         tg3_get_5906_nvram_info(tp);
12608                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12609                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12610                         tg3_get_57780_nvram_info(tp);
12611                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12612                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12613                         tg3_get_5717_nvram_info(tp);
12614                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12615                         tg3_get_5720_nvram_info(tp);
12616                 else
12617                         tg3_get_nvram_info(tp);
12618
12619                 if (tp->nvram_size == 0)
12620                         tg3_get_nvram_size(tp);
12621
12622                 tg3_disable_nvram_access(tp);
12623                 tg3_nvram_unlock(tp);
12624
12625         } else {
12626                 tg3_flag_clear(tp, NVRAM);
12627                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12628
12629                 tg3_get_eeprom_size(tp);
12630         }
12631 }
12632
12633 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12634                                     u32 offset, u32 len, u8 *buf)
12635 {
12636         int i, j, rc = 0;
12637         u32 val;
12638
12639         for (i = 0; i < len; i += 4) {
12640                 u32 addr;
12641                 __be32 data;
12642
12643                 addr = offset + i;
12644
12645                 memcpy(&data, buf + i, 4);
12646
12647                 /*
12648                  * The SEEPROM interface expects the data to always be opposite
12649                  * the native endian format.  We accomplish this by reversing
12650                  * all the operations that would have been performed on the
12651                  * data from a call to tg3_nvram_read_be32().
12652                  */
12653                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12654
12655                 val = tr32(GRC_EEPROM_ADDR);
12656                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12657
12658                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12659                         EEPROM_ADDR_READ);
12660                 tw32(GRC_EEPROM_ADDR, val |
12661                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12662                         (addr & EEPROM_ADDR_ADDR_MASK) |
12663                         EEPROM_ADDR_START |
12664                         EEPROM_ADDR_WRITE);
12665
12666                 for (j = 0; j < 1000; j++) {
12667                         val = tr32(GRC_EEPROM_ADDR);
12668
12669                         if (val & EEPROM_ADDR_COMPLETE)
12670                                 break;
12671                         msleep(1);
12672                 }
12673                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12674                         rc = -EBUSY;
12675                         break;
12676                 }
12677         }
12678
12679         return rc;
12680 }
12681
12682 /* offset and length are dword aligned */
12683 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12684                 u8 *buf)
12685 {
12686         int ret = 0;
12687         u32 pagesize = tp->nvram_pagesize;
12688         u32 pagemask = pagesize - 1;
12689         u32 nvram_cmd;
12690         u8 *tmp;
12691
12692         tmp = kmalloc(pagesize, GFP_KERNEL);
12693         if (tmp == NULL)
12694                 return -ENOMEM;
12695
12696         while (len) {
12697                 int j;
12698                 u32 phy_addr, page_off, size;
12699
12700                 phy_addr = offset & ~pagemask;
12701
12702                 for (j = 0; j < pagesize; j += 4) {
12703                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12704                                                   (__be32 *) (tmp + j));
12705                         if (ret)
12706                                 break;
12707                 }
12708                 if (ret)
12709                         break;
12710
12711                 page_off = offset & pagemask;
12712                 size = pagesize;
12713                 if (len < size)
12714                         size = len;
12715
12716                 len -= size;
12717
12718                 memcpy(tmp + page_off, buf, size);
12719
12720                 offset = offset + (pagesize - page_off);
12721
12722                 tg3_enable_nvram_access(tp);
12723
12724                 /*
12725                  * Before we can erase the flash page, we need
12726                  * to issue a special "write enable" command.
12727                  */
12728                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12729
12730                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12731                         break;
12732
12733                 /* Erase the target page */
12734                 tw32(NVRAM_ADDR, phy_addr);
12735
12736                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12737                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12738
12739                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12740                         break;
12741
12742                 /* Issue another write enable to start the write. */
12743                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12744
12745                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12746                         break;
12747
12748                 for (j = 0; j < pagesize; j += 4) {
12749                         __be32 data;
12750
12751                         data = *((__be32 *) (tmp + j));
12752
12753                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12754
12755                         tw32(NVRAM_ADDR, phy_addr + j);
12756
12757                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12758                                 NVRAM_CMD_WR;
12759
12760                         if (j == 0)
12761                                 nvram_cmd |= NVRAM_CMD_FIRST;
12762                         else if (j == (pagesize - 4))
12763                                 nvram_cmd |= NVRAM_CMD_LAST;
12764
12765                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12766                                 break;
12767                 }
12768                 if (ret)
12769                         break;
12770         }
12771
12772         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12773         tg3_nvram_exec_cmd(tp, nvram_cmd);
12774
12775         kfree(tmp);
12776
12777         return ret;
12778 }
12779
12780 /* offset and length are dword aligned */
12781 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12782                 u8 *buf)
12783 {
12784         int i, ret = 0;
12785
12786         for (i = 0; i < len; i += 4, offset += 4) {
12787                 u32 page_off, phy_addr, nvram_cmd;
12788                 __be32 data;
12789
12790                 memcpy(&data, buf + i, 4);
12791                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12792
12793                 page_off = offset % tp->nvram_pagesize;
12794
12795                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12796
12797                 tw32(NVRAM_ADDR, phy_addr);
12798
12799                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12800
12801                 if (page_off == 0 || i == 0)
12802                         nvram_cmd |= NVRAM_CMD_FIRST;
12803                 if (page_off == (tp->nvram_pagesize - 4))
12804                         nvram_cmd |= NVRAM_CMD_LAST;
12805
12806                 if (i == (len - 4))
12807                         nvram_cmd |= NVRAM_CMD_LAST;
12808
12809                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12810                     !tg3_flag(tp, 5755_PLUS) &&
12811                     (tp->nvram_jedecnum == JEDEC_ST) &&
12812                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12813
12814                         if ((ret = tg3_nvram_exec_cmd(tp,
12815                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12816                                 NVRAM_CMD_DONE)))
12817
12818                                 break;
12819                 }
12820                 if (!tg3_flag(tp, FLASH)) {
12821                         /* We always do complete word writes to eeprom. */
12822                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12823                 }
12824
12825                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12826                         break;
12827         }
12828         return ret;
12829 }
12830
12831 /* offset and length are dword aligned */
12832 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12833 {
12834         int ret;
12835
12836         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12837                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12838                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12839                 udelay(40);
12840         }
12841
12842         if (!tg3_flag(tp, NVRAM)) {
12843                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12844         } else {
12845                 u32 grc_mode;
12846
12847                 ret = tg3_nvram_lock(tp);
12848                 if (ret)
12849                         return ret;
12850
12851                 tg3_enable_nvram_access(tp);
12852                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12853                         tw32(NVRAM_WRITE1, 0x406);
12854
12855                 grc_mode = tr32(GRC_MODE);
12856                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12857
12858                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12859                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12860                                 buf);
12861                 } else {
12862                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12863                                 buf);
12864                 }
12865
12866                 grc_mode = tr32(GRC_MODE);
12867                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12868
12869                 tg3_disable_nvram_access(tp);
12870                 tg3_nvram_unlock(tp);
12871         }
12872
12873         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12874                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12875                 udelay(40);
12876         }
12877
12878         return ret;
12879 }
12880
12881 struct subsys_tbl_ent {
12882         u16 subsys_vendor, subsys_devid;
12883         u32 phy_id;
12884 };
12885
12886 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12887         /* Broadcom boards. */
12888         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12889           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12890         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12891           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12892         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12893           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12894         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12895           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12896         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12897           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12898         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12899           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12900         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12901           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12902         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12903           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12904         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12905           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12906         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12907           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12908         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12909           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12910
12911         /* 3com boards. */
12912         { TG3PCI_SUBVENDOR_ID_3COM,
12913           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12914         { TG3PCI_SUBVENDOR_ID_3COM,
12915           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12916         { TG3PCI_SUBVENDOR_ID_3COM,
12917           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12918         { TG3PCI_SUBVENDOR_ID_3COM,
12919           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12920         { TG3PCI_SUBVENDOR_ID_3COM,
12921           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12922
12923         /* DELL boards. */
12924         { TG3PCI_SUBVENDOR_ID_DELL,
12925           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12926         { TG3PCI_SUBVENDOR_ID_DELL,
12927           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12928         { TG3PCI_SUBVENDOR_ID_DELL,
12929           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12930         { TG3PCI_SUBVENDOR_ID_DELL,
12931           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12932
12933         /* Compaq boards. */
12934         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12935           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12936         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12937           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12938         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12939           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12940         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12941           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12942         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12943           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12944
12945         /* IBM boards. */
12946         { TG3PCI_SUBVENDOR_ID_IBM,
12947           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12948 };
12949
12950 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12951 {
12952         int i;
12953
12954         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12955                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12956                      tp->pdev->subsystem_vendor) &&
12957                     (subsys_id_to_phy_id[i].subsys_devid ==
12958                      tp->pdev->subsystem_device))
12959                         return &subsys_id_to_phy_id[i];
12960         }
12961         return NULL;
12962 }
12963
12964 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12965 {
12966         u32 val;
12967
12968         tp->phy_id = TG3_PHY_ID_INVALID;
12969         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12970
12971         /* Assume an onboard device and WOL capable by default.  */
12972         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12973         tg3_flag_set(tp, WOL_CAP);
12974
12975         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12976                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12977                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12978                         tg3_flag_set(tp, IS_NIC);
12979                 }
12980                 val = tr32(VCPU_CFGSHDW);
12981                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12982                         tg3_flag_set(tp, ASPM_WORKAROUND);
12983                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12984                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12985                         tg3_flag_set(tp, WOL_ENABLE);
12986                         device_set_wakeup_enable(&tp->pdev->dev, true);
12987                 }
12988                 goto done;
12989         }
12990
12991         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12992         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12993                 u32 nic_cfg, led_cfg;
12994                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12995                 int eeprom_phy_serdes = 0;
12996
12997                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12998                 tp->nic_sram_data_cfg = nic_cfg;
12999
13000                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13001                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13002                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13003                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13004                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13005                     (ver > 0) && (ver < 0x100))
13006                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13007
13008                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13009                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13010
13011                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13012                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13013                         eeprom_phy_serdes = 1;
13014
13015                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13016                 if (nic_phy_id != 0) {
13017                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13018                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13019
13020                         eeprom_phy_id  = (id1 >> 16) << 10;
13021                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13022                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13023                 } else
13024                         eeprom_phy_id = 0;
13025
13026                 tp->phy_id = eeprom_phy_id;
13027                 if (eeprom_phy_serdes) {
13028                         if (!tg3_flag(tp, 5705_PLUS))
13029                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13030                         else
13031                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13032                 }
13033
13034                 if (tg3_flag(tp, 5750_PLUS))
13035                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13036                                     SHASTA_EXT_LED_MODE_MASK);
13037                 else
13038                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13039
13040                 switch (led_cfg) {
13041                 default:
13042                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13043                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13044                         break;
13045
13046                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13047                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13048                         break;
13049
13050                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13051                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13052
13053                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13054                          * read on some older 5700/5701 bootcode.
13055                          */
13056                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13057                             ASIC_REV_5700 ||
13058                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13059                             ASIC_REV_5701)
13060                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13061
13062                         break;
13063
13064                 case SHASTA_EXT_LED_SHARED:
13065                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13066                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13067                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13068                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13069                                                  LED_CTRL_MODE_PHY_2);
13070                         break;
13071
13072                 case SHASTA_EXT_LED_MAC:
13073                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13074                         break;
13075
13076                 case SHASTA_EXT_LED_COMBO:
13077                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13078                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13079                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13080                                                  LED_CTRL_MODE_PHY_2);
13081                         break;
13082
13083                 }
13084
13085                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13086                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13087                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13088                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13089
13090                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13091                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13092
13093                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13094                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13095                         if ((tp->pdev->subsystem_vendor ==
13096                              PCI_VENDOR_ID_ARIMA) &&
13097                             (tp->pdev->subsystem_device == 0x205a ||
13098                              tp->pdev->subsystem_device == 0x2063))
13099                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13100                 } else {
13101                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13102                         tg3_flag_set(tp, IS_NIC);
13103                 }
13104
13105                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13106                         tg3_flag_set(tp, ENABLE_ASF);
13107                         if (tg3_flag(tp, 5750_PLUS))
13108                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13109                 }
13110
13111                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13112                     tg3_flag(tp, 5750_PLUS))
13113                         tg3_flag_set(tp, ENABLE_APE);
13114
13115                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13116                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13117                         tg3_flag_clear(tp, WOL_CAP);
13118
13119                 if (tg3_flag(tp, WOL_CAP) &&
13120                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13121                         tg3_flag_set(tp, WOL_ENABLE);
13122                         device_set_wakeup_enable(&tp->pdev->dev, true);
13123                 }
13124
13125                 if (cfg2 & (1 << 17))
13126                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13127
13128                 /* serdes signal pre-emphasis in register 0x590 set by */
13129                 /* bootcode if bit 18 is set */
13130                 if (cfg2 & (1 << 18))
13131                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13132
13133                 if ((tg3_flag(tp, 57765_PLUS) ||
13134                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13135                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13136                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13137                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13138
13139                 if (tg3_flag(tp, PCI_EXPRESS) &&
13140                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13141                     !tg3_flag(tp, 57765_PLUS)) {
13142                         u32 cfg3;
13143
13144                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13145                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13146                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13147                 }
13148
13149                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13150                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13151                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13152                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13153                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13154                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13155         }
13156 done:
13157         if (tg3_flag(tp, WOL_CAP))
13158                 device_set_wakeup_enable(&tp->pdev->dev,
13159                                          tg3_flag(tp, WOL_ENABLE));
13160         else
13161                 device_set_wakeup_capable(&tp->pdev->dev, false);
13162 }
13163
13164 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13165 {
13166         int i;
13167         u32 val;
13168
13169         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13170         tw32(OTP_CTRL, cmd);
13171
13172         /* Wait for up to 1 ms for command to execute. */
13173         for (i = 0; i < 100; i++) {
13174                 val = tr32(OTP_STATUS);
13175                 if (val & OTP_STATUS_CMD_DONE)
13176                         break;
13177                 udelay(10);
13178         }
13179
13180         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13181 }
13182
13183 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13184  * configuration is a 32-bit value that straddles the alignment boundary.
13185  * We do two 32-bit reads and then shift and merge the results.
13186  */
13187 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13188 {
13189         u32 bhalf_otp, thalf_otp;
13190
13191         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13192
13193         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13194                 return 0;
13195
13196         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13197
13198         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13199                 return 0;
13200
13201         thalf_otp = tr32(OTP_READ_DATA);
13202
13203         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13204
13205         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13206                 return 0;
13207
13208         bhalf_otp = tr32(OTP_READ_DATA);
13209
13210         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13211 }
13212
13213 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13214 {
13215         u32 adv = ADVERTISED_Autoneg |
13216                   ADVERTISED_Pause;
13217
13218         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13219                 adv |= ADVERTISED_1000baseT_Half |
13220                        ADVERTISED_1000baseT_Full;
13221
13222         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13223                 adv |= ADVERTISED_100baseT_Half |
13224                        ADVERTISED_100baseT_Full |
13225                        ADVERTISED_10baseT_Half |
13226                        ADVERTISED_10baseT_Full |
13227                        ADVERTISED_TP;
13228         else
13229                 adv |= ADVERTISED_FIBRE;
13230
13231         tp->link_config.advertising = adv;
13232         tp->link_config.speed = SPEED_INVALID;
13233         tp->link_config.duplex = DUPLEX_INVALID;
13234         tp->link_config.autoneg = AUTONEG_ENABLE;
13235         tp->link_config.active_speed = SPEED_INVALID;
13236         tp->link_config.active_duplex = DUPLEX_INVALID;
13237         tp->link_config.orig_speed = SPEED_INVALID;
13238         tp->link_config.orig_duplex = DUPLEX_INVALID;
13239         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13240 }
13241
13242 static int __devinit tg3_phy_probe(struct tg3 *tp)
13243 {
13244         u32 hw_phy_id_1, hw_phy_id_2;
13245         u32 hw_phy_id, hw_phy_id_masked;
13246         int err;
13247
13248         /* flow control autonegotiation is default behavior */
13249         tg3_flag_set(tp, PAUSE_AUTONEG);
13250         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13251
13252         if (tg3_flag(tp, USE_PHYLIB))
13253                 return tg3_phy_init(tp);
13254
13255         /* Reading the PHY ID register can conflict with ASF
13256          * firmware access to the PHY hardware.
13257          */
13258         err = 0;
13259         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13260                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13261         } else {
13262                 /* Now read the physical PHY_ID from the chip and verify
13263                  * that it is sane.  If it doesn't look good, we fall back
13264                  * to either the hard-coded table based PHY_ID and failing
13265                  * that the value found in the eeprom area.
13266                  */
13267                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13268                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13269
13270                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13271                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13272                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13273
13274                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13275         }
13276
13277         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13278                 tp->phy_id = hw_phy_id;
13279                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13280                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13281                 else
13282                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13283         } else {
13284                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13285                         /* Do nothing, phy ID already set up in
13286                          * tg3_get_eeprom_hw_cfg().
13287                          */
13288                 } else {
13289                         struct subsys_tbl_ent *p;
13290
13291                         /* No eeprom signature?  Try the hardcoded
13292                          * subsys device table.
13293                          */
13294                         p = tg3_lookup_by_subsys(tp);
13295                         if (!p)
13296                                 return -ENODEV;
13297
13298                         tp->phy_id = p->phy_id;
13299                         if (!tp->phy_id ||
13300                             tp->phy_id == TG3_PHY_ID_BCM8002)
13301                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13302                 }
13303         }
13304
13305         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13306             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13307              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13308              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13309               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13310              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13311               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13312                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13313
13314         tg3_phy_init_link_config(tp);
13315
13316         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13317             !tg3_flag(tp, ENABLE_APE) &&
13318             !tg3_flag(tp, ENABLE_ASF)) {
13319                 u32 bmsr, mask;
13320
13321                 tg3_readphy(tp, MII_BMSR, &bmsr);
13322                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13323                     (bmsr & BMSR_LSTATUS))
13324                         goto skip_phy_reset;
13325
13326                 err = tg3_phy_reset(tp);
13327                 if (err)
13328                         return err;
13329
13330                 tg3_phy_set_wirespeed(tp);
13331
13332                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13333                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13334                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13335                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13336                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13337                                             tp->link_config.flowctrl);
13338
13339                         tg3_writephy(tp, MII_BMCR,
13340                                      BMCR_ANENABLE | BMCR_ANRESTART);
13341                 }
13342         }
13343
13344 skip_phy_reset:
13345         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13346                 err = tg3_init_5401phy_dsp(tp);
13347                 if (err)
13348                         return err;
13349
13350                 err = tg3_init_5401phy_dsp(tp);
13351         }
13352
13353         return err;
13354 }
13355
13356 static void __devinit tg3_read_vpd(struct tg3 *tp)
13357 {
13358         u8 *vpd_data;
13359         unsigned int block_end, rosize, len;
13360         u32 vpdlen;
13361         int j, i = 0;
13362
13363         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13364         if (!vpd_data)
13365                 goto out_no_vpd;
13366
13367         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13368         if (i < 0)
13369                 goto out_not_found;
13370
13371         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13372         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13373         i += PCI_VPD_LRDT_TAG_SIZE;
13374
13375         if (block_end > vpdlen)
13376                 goto out_not_found;
13377
13378         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13379                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13380         if (j > 0) {
13381                 len = pci_vpd_info_field_size(&vpd_data[j]);
13382
13383                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13384                 if (j + len > block_end || len != 4 ||
13385                     memcmp(&vpd_data[j], "1028", 4))
13386                         goto partno;
13387
13388                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13389                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13390                 if (j < 0)
13391                         goto partno;
13392
13393                 len = pci_vpd_info_field_size(&vpd_data[j]);
13394
13395                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13396                 if (j + len > block_end)
13397                         goto partno;
13398
13399                 memcpy(tp->fw_ver, &vpd_data[j], len);
13400                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13401         }
13402
13403 partno:
13404         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13405                                       PCI_VPD_RO_KEYWORD_PARTNO);
13406         if (i < 0)
13407                 goto out_not_found;
13408
13409         len = pci_vpd_info_field_size(&vpd_data[i]);
13410
13411         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13412         if (len > TG3_BPN_SIZE ||
13413             (len + i) > vpdlen)
13414                 goto out_not_found;
13415
13416         memcpy(tp->board_part_number, &vpd_data[i], len);
13417
13418 out_not_found:
13419         kfree(vpd_data);
13420         if (tp->board_part_number[0])
13421                 return;
13422
13423 out_no_vpd:
13424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13425                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13426                         strcpy(tp->board_part_number, "BCM5717");
13427                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13428                         strcpy(tp->board_part_number, "BCM5718");
13429                 else
13430                         goto nomatch;
13431         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13432                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13433                         strcpy(tp->board_part_number, "BCM57780");
13434                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13435                         strcpy(tp->board_part_number, "BCM57760");
13436                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13437                         strcpy(tp->board_part_number, "BCM57790");
13438                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13439                         strcpy(tp->board_part_number, "BCM57788");
13440                 else
13441                         goto nomatch;
13442         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13443                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13444                         strcpy(tp->board_part_number, "BCM57761");
13445                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13446                         strcpy(tp->board_part_number, "BCM57765");
13447                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13448                         strcpy(tp->board_part_number, "BCM57781");
13449                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13450                         strcpy(tp->board_part_number, "BCM57785");
13451                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13452                         strcpy(tp->board_part_number, "BCM57791");
13453                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13454                         strcpy(tp->board_part_number, "BCM57795");
13455                 else
13456                         goto nomatch;
13457         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13458                 strcpy(tp->board_part_number, "BCM95906");
13459         } else {
13460 nomatch:
13461                 strcpy(tp->board_part_number, "none");
13462         }
13463 }
13464
13465 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13466 {
13467         u32 val;
13468
13469         if (tg3_nvram_read(tp, offset, &val) ||
13470             (val & 0xfc000000) != 0x0c000000 ||
13471             tg3_nvram_read(tp, offset + 4, &val) ||
13472             val != 0)
13473                 return 0;
13474
13475         return 1;
13476 }
13477
13478 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13479 {
13480         u32 val, offset, start, ver_offset;
13481         int i, dst_off;
13482         bool newver = false;
13483
13484         if (tg3_nvram_read(tp, 0xc, &offset) ||
13485             tg3_nvram_read(tp, 0x4, &start))
13486                 return;
13487
13488         offset = tg3_nvram_logical_addr(tp, offset);
13489
13490         if (tg3_nvram_read(tp, offset, &val))
13491                 return;
13492
13493         if ((val & 0xfc000000) == 0x0c000000) {
13494                 if (tg3_nvram_read(tp, offset + 4, &val))
13495                         return;
13496
13497                 if (val == 0)
13498                         newver = true;
13499         }
13500
13501         dst_off = strlen(tp->fw_ver);
13502
13503         if (newver) {
13504                 if (TG3_VER_SIZE - dst_off < 16 ||
13505                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13506                         return;
13507
13508                 offset = offset + ver_offset - start;
13509                 for (i = 0; i < 16; i += 4) {
13510                         __be32 v;
13511                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13512                                 return;
13513
13514                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13515                 }
13516         } else {
13517                 u32 major, minor;
13518
13519                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13520                         return;
13521
13522                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13523                         TG3_NVM_BCVER_MAJSFT;
13524                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13525                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13526                          "v%d.%02d", major, minor);
13527         }
13528 }
13529
13530 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13531 {
13532         u32 val, major, minor;
13533
13534         /* Use native endian representation */
13535         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13536                 return;
13537
13538         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13539                 TG3_NVM_HWSB_CFG1_MAJSFT;
13540         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13541                 TG3_NVM_HWSB_CFG1_MINSFT;
13542
13543         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13544 }
13545
13546 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13547 {
13548         u32 offset, major, minor, build;
13549
13550         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13551
13552         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13553                 return;
13554
13555         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13556         case TG3_EEPROM_SB_REVISION_0:
13557                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13558                 break;
13559         case TG3_EEPROM_SB_REVISION_2:
13560                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13561                 break;
13562         case TG3_EEPROM_SB_REVISION_3:
13563                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13564                 break;
13565         case TG3_EEPROM_SB_REVISION_4:
13566                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13567                 break;
13568         case TG3_EEPROM_SB_REVISION_5:
13569                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13570                 break;
13571         case TG3_EEPROM_SB_REVISION_6:
13572                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13573                 break;
13574         default:
13575                 return;
13576         }
13577
13578         if (tg3_nvram_read(tp, offset, &val))
13579                 return;
13580
13581         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13582                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13583         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13584                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13585         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13586
13587         if (minor > 99 || build > 26)
13588                 return;
13589
13590         offset = strlen(tp->fw_ver);
13591         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13592                  " v%d.%02d", major, minor);
13593
13594         if (build > 0) {
13595                 offset = strlen(tp->fw_ver);
13596                 if (offset < TG3_VER_SIZE - 1)
13597                         tp->fw_ver[offset] = 'a' + build - 1;
13598         }
13599 }
13600
13601 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13602 {
13603         u32 val, offset, start;
13604         int i, vlen;
13605
13606         for (offset = TG3_NVM_DIR_START;
13607              offset < TG3_NVM_DIR_END;
13608              offset += TG3_NVM_DIRENT_SIZE) {
13609                 if (tg3_nvram_read(tp, offset, &val))
13610                         return;
13611
13612                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13613                         break;
13614         }
13615
13616         if (offset == TG3_NVM_DIR_END)
13617                 return;
13618
13619         if (!tg3_flag(tp, 5705_PLUS))
13620                 start = 0x08000000;
13621         else if (tg3_nvram_read(tp, offset - 4, &start))
13622                 return;
13623
13624         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13625             !tg3_fw_img_is_valid(tp, offset) ||
13626             tg3_nvram_read(tp, offset + 8, &val))
13627                 return;
13628
13629         offset += val - start;
13630
13631         vlen = strlen(tp->fw_ver);
13632
13633         tp->fw_ver[vlen++] = ',';
13634         tp->fw_ver[vlen++] = ' ';
13635
13636         for (i = 0; i < 4; i++) {
13637                 __be32 v;
13638                 if (tg3_nvram_read_be32(tp, offset, &v))
13639                         return;
13640
13641                 offset += sizeof(v);
13642
13643                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13644                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13645                         break;
13646                 }
13647
13648                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13649                 vlen += sizeof(v);
13650         }
13651 }
13652
13653 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13654 {
13655         int vlen;
13656         u32 apedata;
13657         char *fwtype;
13658
13659         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13660                 return;
13661
13662         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13663         if (apedata != APE_SEG_SIG_MAGIC)
13664                 return;
13665
13666         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13667         if (!(apedata & APE_FW_STATUS_READY))
13668                 return;
13669
13670         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13671
13672         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13673                 tg3_flag_set(tp, APE_HAS_NCSI);
13674                 fwtype = "NCSI";
13675         } else {
13676                 fwtype = "DASH";
13677         }
13678
13679         vlen = strlen(tp->fw_ver);
13680
13681         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13682                  fwtype,
13683                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13684                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13685                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13686                  (apedata & APE_FW_VERSION_BLDMSK));
13687 }
13688
13689 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13690 {
13691         u32 val;
13692         bool vpd_vers = false;
13693
13694         if (tp->fw_ver[0] != 0)
13695                 vpd_vers = true;
13696
13697         if (tg3_flag(tp, NO_NVRAM)) {
13698                 strcat(tp->fw_ver, "sb");
13699                 return;
13700         }
13701
13702         if (tg3_nvram_read(tp, 0, &val))
13703                 return;
13704
13705         if (val == TG3_EEPROM_MAGIC)
13706                 tg3_read_bc_ver(tp);
13707         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13708                 tg3_read_sb_ver(tp, val);
13709         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13710                 tg3_read_hwsb_ver(tp);
13711         else
13712                 return;
13713
13714         if (vpd_vers)
13715                 goto done;
13716
13717         if (tg3_flag(tp, ENABLE_APE)) {
13718                 if (tg3_flag(tp, ENABLE_ASF))
13719                         tg3_read_dash_ver(tp);
13720         } else if (tg3_flag(tp, ENABLE_ASF)) {
13721                 tg3_read_mgmtfw_ver(tp);
13722         }
13723
13724 done:
13725         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13726 }
13727
13728 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13729
13730 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13731 {
13732         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13733                 return TG3_RX_RET_MAX_SIZE_5717;
13734         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13735                 return TG3_RX_RET_MAX_SIZE_5700;
13736         else
13737                 return TG3_RX_RET_MAX_SIZE_5705;
13738 }
13739
13740 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13741         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13742         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13743         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13744         { },
13745 };
13746
13747 static int __devinit tg3_get_invariants(struct tg3 *tp)
13748 {
13749         u32 misc_ctrl_reg;
13750         u32 pci_state_reg, grc_misc_cfg;
13751         u32 val;
13752         u16 pci_cmd;
13753         int err;
13754
13755         /* Force memory write invalidate off.  If we leave it on,
13756          * then on 5700_BX chips we have to enable a workaround.
13757          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13758          * to match the cacheline size.  The Broadcom driver have this
13759          * workaround but turns MWI off all the times so never uses
13760          * it.  This seems to suggest that the workaround is insufficient.
13761          */
13762         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13763         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13764         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13765
13766         /* Important! -- Make sure register accesses are byteswapped
13767          * correctly.  Also, for those chips that require it, make
13768          * sure that indirect register accesses are enabled before
13769          * the first operation.
13770          */
13771         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13772                               &misc_ctrl_reg);
13773         tp->misc_host_ctrl |= (misc_ctrl_reg &
13774                                MISC_HOST_CTRL_CHIPREV);
13775         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13776                                tp->misc_host_ctrl);
13777
13778         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13779                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13780         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13781                 u32 prod_id_asic_rev;
13782
13783                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13784                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13785                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13786                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13787                         pci_read_config_dword(tp->pdev,
13788                                               TG3PCI_GEN2_PRODID_ASICREV,
13789                                               &prod_id_asic_rev);
13790                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13791                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13792                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13793                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13794                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13795                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13796                         pci_read_config_dword(tp->pdev,
13797                                               TG3PCI_GEN15_PRODID_ASICREV,
13798                                               &prod_id_asic_rev);
13799                 else
13800                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13801                                               &prod_id_asic_rev);
13802
13803                 tp->pci_chip_rev_id = prod_id_asic_rev;
13804         }
13805
13806         /* Wrong chip ID in 5752 A0. This code can be removed later
13807          * as A0 is not in production.
13808          */
13809         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13810                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13811
13812         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13813          * we need to disable memory and use config. cycles
13814          * only to access all registers. The 5702/03 chips
13815          * can mistakenly decode the special cycles from the
13816          * ICH chipsets as memory write cycles, causing corruption
13817          * of register and memory space. Only certain ICH bridges
13818          * will drive special cycles with non-zero data during the
13819          * address phase which can fall within the 5703's address
13820          * range. This is not an ICH bug as the PCI spec allows
13821          * non-zero address during special cycles. However, only
13822          * these ICH bridges are known to drive non-zero addresses
13823          * during special cycles.
13824          *
13825          * Since special cycles do not cross PCI bridges, we only
13826          * enable this workaround if the 5703 is on the secondary
13827          * bus of these ICH bridges.
13828          */
13829         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13830             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13831                 static struct tg3_dev_id {
13832                         u32     vendor;
13833                         u32     device;
13834                         u32     rev;
13835                 } ich_chipsets[] = {
13836                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13837                           PCI_ANY_ID },
13838                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13839                           PCI_ANY_ID },
13840                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13841                           0xa },
13842                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13843                           PCI_ANY_ID },
13844                         { },
13845                 };
13846                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13847                 struct pci_dev *bridge = NULL;
13848
13849                 while (pci_id->vendor != 0) {
13850                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13851                                                 bridge);
13852                         if (!bridge) {
13853                                 pci_id++;
13854                                 continue;
13855                         }
13856                         if (pci_id->rev != PCI_ANY_ID) {
13857                                 if (bridge->revision > pci_id->rev)
13858                                         continue;
13859                         }
13860                         if (bridge->subordinate &&
13861                             (bridge->subordinate->number ==
13862                              tp->pdev->bus->number)) {
13863                                 tg3_flag_set(tp, ICH_WORKAROUND);
13864                                 pci_dev_put(bridge);
13865                                 break;
13866                         }
13867                 }
13868         }
13869
13870         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13871                 static struct tg3_dev_id {
13872                         u32     vendor;
13873                         u32     device;
13874                 } bridge_chipsets[] = {
13875                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13876                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13877                         { },
13878                 };
13879                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13880                 struct pci_dev *bridge = NULL;
13881
13882                 while (pci_id->vendor != 0) {
13883                         bridge = pci_get_device(pci_id->vendor,
13884                                                 pci_id->device,
13885                                                 bridge);
13886                         if (!bridge) {
13887                                 pci_id++;
13888                                 continue;
13889                         }
13890                         if (bridge->subordinate &&
13891                             (bridge->subordinate->number <=
13892                              tp->pdev->bus->number) &&
13893                             (bridge->subordinate->subordinate >=
13894                              tp->pdev->bus->number)) {
13895                                 tg3_flag_set(tp, 5701_DMA_BUG);
13896                                 pci_dev_put(bridge);
13897                                 break;
13898                         }
13899                 }
13900         }
13901
13902         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13903          * DMA addresses > 40-bit. This bridge may have other additional
13904          * 57xx devices behind it in some 4-port NIC designs for example.
13905          * Any tg3 device found behind the bridge will also need the 40-bit
13906          * DMA workaround.
13907          */
13908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13910                 tg3_flag_set(tp, 5780_CLASS);
13911                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13912                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13913         } else {
13914                 struct pci_dev *bridge = NULL;
13915
13916                 do {
13917                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13918                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13919                                                 bridge);
13920                         if (bridge && bridge->subordinate &&
13921                             (bridge->subordinate->number <=
13922                              tp->pdev->bus->number) &&
13923                             (bridge->subordinate->subordinate >=
13924                              tp->pdev->bus->number)) {
13925                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13926                                 pci_dev_put(bridge);
13927                                 break;
13928                         }
13929                 } while (bridge);
13930         }
13931
13932         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13933             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13934                 tp->pdev_peer = tg3_find_peer(tp);
13935
13936         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13937             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13938             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13939                 tg3_flag_set(tp, 5717_PLUS);
13940
13941         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13942             tg3_flag(tp, 5717_PLUS))
13943                 tg3_flag_set(tp, 57765_PLUS);
13944
13945         /* Intentionally exclude ASIC_REV_5906 */
13946         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13947             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13948             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13951             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13952             tg3_flag(tp, 57765_PLUS))
13953                 tg3_flag_set(tp, 5755_PLUS);
13954
13955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13956             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13957             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13958             tg3_flag(tp, 5755_PLUS) ||
13959             tg3_flag(tp, 5780_CLASS))
13960                 tg3_flag_set(tp, 5750_PLUS);
13961
13962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13963             tg3_flag(tp, 5750_PLUS))
13964                 tg3_flag_set(tp, 5705_PLUS);
13965
13966         /* Determine TSO capabilities */
13967         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
13968                 ; /* Do nothing. HW bug. */
13969         else if (tg3_flag(tp, 57765_PLUS))
13970                 tg3_flag_set(tp, HW_TSO_3);
13971         else if (tg3_flag(tp, 5755_PLUS) ||
13972                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13973                 tg3_flag_set(tp, HW_TSO_2);
13974         else if (tg3_flag(tp, 5750_PLUS)) {
13975                 tg3_flag_set(tp, HW_TSO_1);
13976                 tg3_flag_set(tp, TSO_BUG);
13977                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13978                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13979                         tg3_flag_clear(tp, TSO_BUG);
13980         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13981                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13982                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13983                         tg3_flag_set(tp, TSO_BUG);
13984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13985                         tp->fw_needed = FIRMWARE_TG3TSO5;
13986                 else
13987                         tp->fw_needed = FIRMWARE_TG3TSO;
13988         }
13989
13990         /* Selectively allow TSO based on operating conditions */
13991         if (tg3_flag(tp, HW_TSO_1) ||
13992             tg3_flag(tp, HW_TSO_2) ||
13993             tg3_flag(tp, HW_TSO_3) ||
13994             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13995                 tg3_flag_set(tp, TSO_CAPABLE);
13996         else {
13997                 tg3_flag_clear(tp, TSO_CAPABLE);
13998                 tg3_flag_clear(tp, TSO_BUG);
13999                 tp->fw_needed = NULL;
14000         }
14001
14002         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14003                 tp->fw_needed = FIRMWARE_TG3;
14004
14005         tp->irq_max = 1;
14006
14007         if (tg3_flag(tp, 5750_PLUS)) {
14008                 tg3_flag_set(tp, SUPPORT_MSI);
14009                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14010                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14011                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14012                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14013                      tp->pdev_peer == tp->pdev))
14014                         tg3_flag_clear(tp, SUPPORT_MSI);
14015
14016                 if (tg3_flag(tp, 5755_PLUS) ||
14017                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14018                         tg3_flag_set(tp, 1SHOT_MSI);
14019                 }
14020
14021                 if (tg3_flag(tp, 57765_PLUS)) {
14022                         tg3_flag_set(tp, SUPPORT_MSIX);
14023                         tp->irq_max = TG3_IRQ_MAX_VECS;
14024                 }
14025         }
14026
14027         if (tg3_flag(tp, 5755_PLUS))
14028                 tg3_flag_set(tp, SHORT_DMA_BUG);
14029
14030         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14031                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14032
14033         if (tg3_flag(tp, 5717_PLUS))
14034                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14035
14036         if (tg3_flag(tp, 57765_PLUS) &&
14037             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14038                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14039
14040         if (!tg3_flag(tp, 5705_PLUS) ||
14041             tg3_flag(tp, 5780_CLASS) ||
14042             tg3_flag(tp, USE_JUMBO_BDFLAG))
14043                 tg3_flag_set(tp, JUMBO_CAPABLE);
14044
14045         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14046                               &pci_state_reg);
14047
14048         if (pci_is_pcie(tp->pdev)) {
14049                 u16 lnkctl;
14050
14051                 tg3_flag_set(tp, PCI_EXPRESS);
14052
14053                 tp->pcie_readrq = 4096;
14054                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14055                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14056                         tp->pcie_readrq = 2048;
14057
14058                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14059
14060                 pci_read_config_word(tp->pdev,
14061                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14062                                      &lnkctl);
14063                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14064                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14065                             ASIC_REV_5906) {
14066                                 tg3_flag_clear(tp, HW_TSO_2);
14067                                 tg3_flag_clear(tp, TSO_CAPABLE);
14068                         }
14069                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14070                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14071                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14072                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14073                                 tg3_flag_set(tp, CLKREQ_BUG);
14074                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14075                         tg3_flag_set(tp, L1PLLPD_EN);
14076                 }
14077         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14078                 /* BCM5785 devices are effectively PCIe devices, and should
14079                  * follow PCIe codepaths, but do not have a PCIe capabilities
14080                  * section.
14081                  */
14082                 tg3_flag_set(tp, PCI_EXPRESS);
14083         } else if (!tg3_flag(tp, 5705_PLUS) ||
14084                    tg3_flag(tp, 5780_CLASS)) {
14085                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14086                 if (!tp->pcix_cap) {
14087                         dev_err(&tp->pdev->dev,
14088                                 "Cannot find PCI-X capability, aborting\n");
14089                         return -EIO;
14090                 }
14091
14092                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14093                         tg3_flag_set(tp, PCIX_MODE);
14094         }
14095
14096         /* If we have an AMD 762 or VIA K8T800 chipset, write
14097          * reordering to the mailbox registers done by the host
14098          * controller can cause major troubles.  We read back from
14099          * every mailbox register write to force the writes to be
14100          * posted to the chip in order.
14101          */
14102         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14103             !tg3_flag(tp, PCI_EXPRESS))
14104                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14105
14106         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14107                              &tp->pci_cacheline_sz);
14108         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14109                              &tp->pci_lat_timer);
14110         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14111             tp->pci_lat_timer < 64) {
14112                 tp->pci_lat_timer = 64;
14113                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14114                                       tp->pci_lat_timer);
14115         }
14116
14117         /* Important! -- It is critical that the PCI-X hw workaround
14118          * situation is decided before the first MMIO register access.
14119          */
14120         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14121                 /* 5700 BX chips need to have their TX producer index
14122                  * mailboxes written twice to workaround a bug.
14123                  */
14124                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14125
14126                 /* If we are in PCI-X mode, enable register write workaround.
14127                  *
14128                  * The workaround is to use indirect register accesses
14129                  * for all chip writes not to mailbox registers.
14130                  */
14131                 if (tg3_flag(tp, PCIX_MODE)) {
14132                         u32 pm_reg;
14133
14134                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14135
14136                         /* The chip can have it's power management PCI config
14137                          * space registers clobbered due to this bug.
14138                          * So explicitly force the chip into D0 here.
14139                          */
14140                         pci_read_config_dword(tp->pdev,
14141                                               tp->pm_cap + PCI_PM_CTRL,
14142                                               &pm_reg);
14143                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14144                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14145                         pci_write_config_dword(tp->pdev,
14146                                                tp->pm_cap + PCI_PM_CTRL,
14147                                                pm_reg);
14148
14149                         /* Also, force SERR#/PERR# in PCI command. */
14150                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14151                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14152                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14153                 }
14154         }
14155
14156         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14157                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14158         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14159                 tg3_flag_set(tp, PCI_32BIT);
14160
14161         /* Chip-specific fixup from Broadcom driver */
14162         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14163             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14164                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14165                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14166         }
14167
14168         /* Default fast path register access methods */
14169         tp->read32 = tg3_read32;
14170         tp->write32 = tg3_write32;
14171         tp->read32_mbox = tg3_read32;
14172         tp->write32_mbox = tg3_write32;
14173         tp->write32_tx_mbox = tg3_write32;
14174         tp->write32_rx_mbox = tg3_write32;
14175
14176         /* Various workaround register access methods */
14177         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14178                 tp->write32 = tg3_write_indirect_reg32;
14179         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14180                  (tg3_flag(tp, PCI_EXPRESS) &&
14181                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14182                 /*
14183                  * Back to back register writes can cause problems on these
14184                  * chips, the workaround is to read back all reg writes
14185                  * except those to mailbox regs.
14186                  *
14187                  * See tg3_write_indirect_reg32().
14188                  */
14189                 tp->write32 = tg3_write_flush_reg32;
14190         }
14191
14192         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14193                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14194                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14195                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14196         }
14197
14198         if (tg3_flag(tp, ICH_WORKAROUND)) {
14199                 tp->read32 = tg3_read_indirect_reg32;
14200                 tp->write32 = tg3_write_indirect_reg32;
14201                 tp->read32_mbox = tg3_read_indirect_mbox;
14202                 tp->write32_mbox = tg3_write_indirect_mbox;
14203                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14204                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14205
14206                 iounmap(tp->regs);
14207                 tp->regs = NULL;
14208
14209                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14210                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14211                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14212         }
14213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14214                 tp->read32_mbox = tg3_read32_mbox_5906;
14215                 tp->write32_mbox = tg3_write32_mbox_5906;
14216                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14217                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14218         }
14219
14220         if (tp->write32 == tg3_write_indirect_reg32 ||
14221             (tg3_flag(tp, PCIX_MODE) &&
14222              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14223               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14224                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14225
14226         /* The memory arbiter has to be enabled in order for SRAM accesses
14227          * to succeed.  Normally on powerup the tg3 chip firmware will make
14228          * sure it is enabled, but other entities such as system netboot
14229          * code might disable it.
14230          */
14231         val = tr32(MEMARB_MODE);
14232         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14233
14234         if (tg3_flag(tp, PCIX_MODE)) {
14235                 pci_read_config_dword(tp->pdev,
14236                                       tp->pcix_cap + PCI_X_STATUS, &val);
14237                 tp->pci_fn = val & 0x7;
14238         } else {
14239                 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14240         }
14241
14242         /* Get eeprom hw config before calling tg3_set_power_state().
14243          * In particular, the TG3_FLAG_IS_NIC flag must be
14244          * determined before calling tg3_set_power_state() so that
14245          * we know whether or not to switch out of Vaux power.
14246          * When the flag is set, it means that GPIO1 is used for eeprom
14247          * write protect and also implies that it is a LOM where GPIOs
14248          * are not used to switch power.
14249          */
14250         tg3_get_eeprom_hw_cfg(tp);
14251
14252         if (tg3_flag(tp, ENABLE_APE)) {
14253                 /* Allow reads and writes to the
14254                  * APE register and memory space.
14255                  */
14256                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14257                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14258                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14259                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14260                                        pci_state_reg);
14261
14262                 tg3_ape_lock_init(tp);
14263         }
14264
14265         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14266             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14267             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14268             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14269             tg3_flag(tp, 57765_PLUS))
14270                 tg3_flag_set(tp, CPMU_PRESENT);
14271
14272         /* Set up tp->grc_local_ctrl before calling
14273          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14274          * will bring 5700's external PHY out of reset.
14275          * It is also used as eeprom write protect on LOMs.
14276          */
14277         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14279             tg3_flag(tp, EEPROM_WRITE_PROT))
14280                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14281                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14282         /* Unused GPIO3 must be driven as output on 5752 because there
14283          * are no pull-up resistors on unused GPIO pins.
14284          */
14285         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14286                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14287
14288         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14289             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14290             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14291                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14292
14293         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14294             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14295                 /* Turn off the debug UART. */
14296                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14297                 if (tg3_flag(tp, IS_NIC))
14298                         /* Keep VMain power. */
14299                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14300                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14301         }
14302
14303         /* Switch out of Vaux if it is a NIC */
14304         tg3_pwrsrc_switch_to_vmain(tp);
14305
14306         /* Derive initial jumbo mode from MTU assigned in
14307          * ether_setup() via the alloc_etherdev() call
14308          */
14309         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14310                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14311
14312         /* Determine WakeOnLan speed to use. */
14313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14314             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14315             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14316             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14317                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14318         } else {
14319                 tg3_flag_set(tp, WOL_SPEED_100MB);
14320         }
14321
14322         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14323                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14324
14325         /* A few boards don't want Ethernet@WireSpeed phy feature */
14326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14327             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14328              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14329              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14330             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14331             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14332                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14333
14334         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14335             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14336                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14337         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14338                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14339
14340         if (tg3_flag(tp, 5705_PLUS) &&
14341             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14342             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14343             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14344             !tg3_flag(tp, 57765_PLUS)) {
14345                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14346                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14347                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14348                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14349                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14350                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14351                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14352                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14353                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14354                 } else
14355                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14356         }
14357
14358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14359             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14360                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14361                 if (tp->phy_otp == 0)
14362                         tp->phy_otp = TG3_OTP_DEFAULT;
14363         }
14364
14365         if (tg3_flag(tp, CPMU_PRESENT))
14366                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14367         else
14368                 tp->mi_mode = MAC_MI_MODE_BASE;
14369
14370         tp->coalesce_mode = 0;
14371         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14372             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14373                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14374
14375         /* Set these bits to enable statistics workaround. */
14376         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14377             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14378             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14379                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14380                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14381         }
14382
14383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14384             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14385                 tg3_flag_set(tp, USE_PHYLIB);
14386
14387         err = tg3_mdio_init(tp);
14388         if (err)
14389                 return err;
14390
14391         /* Initialize data/descriptor byte/word swapping. */
14392         val = tr32(GRC_MODE);
14393         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14394                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14395                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14396                         GRC_MODE_B2HRX_ENABLE |
14397                         GRC_MODE_HTX2B_ENABLE |
14398                         GRC_MODE_HOST_STACKUP);
14399         else
14400                 val &= GRC_MODE_HOST_STACKUP;
14401
14402         tw32(GRC_MODE, val | tp->grc_mode);
14403
14404         tg3_switch_clocks(tp);
14405
14406         /* Clear this out for sanity. */
14407         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14408
14409         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14410                               &pci_state_reg);
14411         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14412             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14413                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14414
14415                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14416                     chiprevid == CHIPREV_ID_5701_B0 ||
14417                     chiprevid == CHIPREV_ID_5701_B2 ||
14418                     chiprevid == CHIPREV_ID_5701_B5) {
14419                         void __iomem *sram_base;
14420
14421                         /* Write some dummy words into the SRAM status block
14422                          * area, see if it reads back correctly.  If the return
14423                          * value is bad, force enable the PCIX workaround.
14424                          */
14425                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14426
14427                         writel(0x00000000, sram_base);
14428                         writel(0x00000000, sram_base + 4);
14429                         writel(0xffffffff, sram_base + 4);
14430                         if (readl(sram_base) != 0x00000000)
14431                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14432                 }
14433         }
14434
14435         udelay(50);
14436         tg3_nvram_init(tp);
14437
14438         grc_misc_cfg = tr32(GRC_MISC_CFG);
14439         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14440
14441         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14442             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14443              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14444                 tg3_flag_set(tp, IS_5788);
14445
14446         if (!tg3_flag(tp, IS_5788) &&
14447             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14448                 tg3_flag_set(tp, TAGGED_STATUS);
14449         if (tg3_flag(tp, TAGGED_STATUS)) {
14450                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14451                                       HOSTCC_MODE_CLRTICK_TXBD);
14452
14453                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14454                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14455                                        tp->misc_host_ctrl);
14456         }
14457
14458         /* Preserve the APE MAC_MODE bits */
14459         if (tg3_flag(tp, ENABLE_APE))
14460                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14461         else
14462                 tp->mac_mode = 0;
14463
14464         /* these are limited to 10/100 only */
14465         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14466              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14467             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14468              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14469              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14470               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14471               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14472             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14473              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14474               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14475               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14476             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14477             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14478             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14479             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14480                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14481
14482         err = tg3_phy_probe(tp);
14483         if (err) {
14484                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14485                 /* ... but do not return immediately ... */
14486                 tg3_mdio_fini(tp);
14487         }
14488
14489         tg3_read_vpd(tp);
14490         tg3_read_fw_ver(tp);
14491
14492         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14493                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14494         } else {
14495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14496                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14497                 else
14498                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14499         }
14500
14501         /* 5700 {AX,BX} chips have a broken status block link
14502          * change bit implementation, so we must use the
14503          * status register in those cases.
14504          */
14505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14506                 tg3_flag_set(tp, USE_LINKCHG_REG);
14507         else
14508                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14509
14510         /* The led_ctrl is set during tg3_phy_probe, here we might
14511          * have to force the link status polling mechanism based
14512          * upon subsystem IDs.
14513          */
14514         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14515             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14516             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14517                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14518                 tg3_flag_set(tp, USE_LINKCHG_REG);
14519         }
14520
14521         /* For all SERDES we poll the MAC status register. */
14522         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14523                 tg3_flag_set(tp, POLL_SERDES);
14524         else
14525                 tg3_flag_clear(tp, POLL_SERDES);
14526
14527         tp->rx_offset = NET_IP_ALIGN;
14528         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14529         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14530             tg3_flag(tp, PCIX_MODE)) {
14531                 tp->rx_offset = 0;
14532 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14533                 tp->rx_copy_thresh = ~(u16)0;
14534 #endif
14535         }
14536
14537         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14538         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14539         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14540
14541         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14542
14543         /* Increment the rx prod index on the rx std ring by at most
14544          * 8 for these chips to workaround hw errata.
14545          */
14546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14547             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14548             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14549                 tp->rx_std_max_post = 8;
14550
14551         if (tg3_flag(tp, ASPM_WORKAROUND))
14552                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14553                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14554
14555         return err;
14556 }
14557
14558 #ifdef CONFIG_SPARC
14559 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14560 {
14561         struct net_device *dev = tp->dev;
14562         struct pci_dev *pdev = tp->pdev;
14563         struct device_node *dp = pci_device_to_OF_node(pdev);
14564         const unsigned char *addr;
14565         int len;
14566
14567         addr = of_get_property(dp, "local-mac-address", &len);
14568         if (addr && len == 6) {
14569                 memcpy(dev->dev_addr, addr, 6);
14570                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14571                 return 0;
14572         }
14573         return -ENODEV;
14574 }
14575
14576 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14577 {
14578         struct net_device *dev = tp->dev;
14579
14580         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14581         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14582         return 0;
14583 }
14584 #endif
14585
14586 static int __devinit tg3_get_device_address(struct tg3 *tp)
14587 {
14588         struct net_device *dev = tp->dev;
14589         u32 hi, lo, mac_offset;
14590         int addr_ok = 0;
14591
14592 #ifdef CONFIG_SPARC
14593         if (!tg3_get_macaddr_sparc(tp))
14594                 return 0;
14595 #endif
14596
14597         mac_offset = 0x7c;
14598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14599             tg3_flag(tp, 5780_CLASS)) {
14600                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14601                         mac_offset = 0xcc;
14602                 if (tg3_nvram_lock(tp))
14603                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14604                 else
14605                         tg3_nvram_unlock(tp);
14606         } else if (tg3_flag(tp, 5717_PLUS)) {
14607                 if (tp->pci_fn & 1)
14608                         mac_offset = 0xcc;
14609                 if (tp->pci_fn > 1)
14610                         mac_offset += 0x18c;
14611         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14612                 mac_offset = 0x10;
14613
14614         /* First try to get it from MAC address mailbox. */
14615         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14616         if ((hi >> 16) == 0x484b) {
14617                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14618                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14619
14620                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14621                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14622                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14623                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14624                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14625
14626                 /* Some old bootcode may report a 0 MAC address in SRAM */
14627                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14628         }
14629         if (!addr_ok) {
14630                 /* Next, try NVRAM. */
14631                 if (!tg3_flag(tp, NO_NVRAM) &&
14632                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14633                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14634                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14635                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14636                 }
14637                 /* Finally just fetch it out of the MAC control regs. */
14638                 else {
14639                         hi = tr32(MAC_ADDR_0_HIGH);
14640                         lo = tr32(MAC_ADDR_0_LOW);
14641
14642                         dev->dev_addr[5] = lo & 0xff;
14643                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14644                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14645                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14646                         dev->dev_addr[1] = hi & 0xff;
14647                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14648                 }
14649         }
14650
14651         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14652 #ifdef CONFIG_SPARC
14653                 if (!tg3_get_default_macaddr_sparc(tp))
14654                         return 0;
14655 #endif
14656                 return -EINVAL;
14657         }
14658         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14659         return 0;
14660 }
14661
14662 #define BOUNDARY_SINGLE_CACHELINE       1
14663 #define BOUNDARY_MULTI_CACHELINE        2
14664
14665 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14666 {
14667         int cacheline_size;
14668         u8 byte;
14669         int goal;
14670
14671         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14672         if (byte == 0)
14673                 cacheline_size = 1024;
14674         else
14675                 cacheline_size = (int) byte * 4;
14676
14677         /* On 5703 and later chips, the boundary bits have no
14678          * effect.
14679          */
14680         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14681             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14682             !tg3_flag(tp, PCI_EXPRESS))
14683                 goto out;
14684
14685 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14686         goal = BOUNDARY_MULTI_CACHELINE;
14687 #else
14688 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14689         goal = BOUNDARY_SINGLE_CACHELINE;
14690 #else
14691         goal = 0;
14692 #endif
14693 #endif
14694
14695         if (tg3_flag(tp, 57765_PLUS)) {
14696                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14697                 goto out;
14698         }
14699
14700         if (!goal)
14701                 goto out;
14702
14703         /* PCI controllers on most RISC systems tend to disconnect
14704          * when a device tries to burst across a cache-line boundary.
14705          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14706          *
14707          * Unfortunately, for PCI-E there are only limited
14708          * write-side controls for this, and thus for reads
14709          * we will still get the disconnects.  We'll also waste
14710          * these PCI cycles for both read and write for chips
14711          * other than 5700 and 5701 which do not implement the
14712          * boundary bits.
14713          */
14714         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14715                 switch (cacheline_size) {
14716                 case 16:
14717                 case 32:
14718                 case 64:
14719                 case 128:
14720                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14721                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14722                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14723                         } else {
14724                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14725                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14726                         }
14727                         break;
14728
14729                 case 256:
14730                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14731                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14732                         break;
14733
14734                 default:
14735                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14736                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14737                         break;
14738                 }
14739         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14740                 switch (cacheline_size) {
14741                 case 16:
14742                 case 32:
14743                 case 64:
14744                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14745                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14746                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14747                                 break;
14748                         }
14749                         /* fallthrough */
14750                 case 128:
14751                 default:
14752                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14753                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14754                         break;
14755                 }
14756         } else {
14757                 switch (cacheline_size) {
14758                 case 16:
14759                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14760                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14761                                         DMA_RWCTRL_WRITE_BNDRY_16);
14762                                 break;
14763                         }
14764                         /* fallthrough */
14765                 case 32:
14766                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14767                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14768                                         DMA_RWCTRL_WRITE_BNDRY_32);
14769                                 break;
14770                         }
14771                         /* fallthrough */
14772                 case 64:
14773                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14774                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14775                                         DMA_RWCTRL_WRITE_BNDRY_64);
14776                                 break;
14777                         }
14778                         /* fallthrough */
14779                 case 128:
14780                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14781                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14782                                         DMA_RWCTRL_WRITE_BNDRY_128);
14783                                 break;
14784                         }
14785                         /* fallthrough */
14786                 case 256:
14787                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14788                                 DMA_RWCTRL_WRITE_BNDRY_256);
14789                         break;
14790                 case 512:
14791                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14792                                 DMA_RWCTRL_WRITE_BNDRY_512);
14793                         break;
14794                 case 1024:
14795                 default:
14796                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14797                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14798                         break;
14799                 }
14800         }
14801
14802 out:
14803         return val;
14804 }
14805
14806 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14807 {
14808         struct tg3_internal_buffer_desc test_desc;
14809         u32 sram_dma_descs;
14810         int i, ret;
14811
14812         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14813
14814         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14815         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14816         tw32(RDMAC_STATUS, 0);
14817         tw32(WDMAC_STATUS, 0);
14818
14819         tw32(BUFMGR_MODE, 0);
14820         tw32(FTQ_RESET, 0);
14821
14822         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14823         test_desc.addr_lo = buf_dma & 0xffffffff;
14824         test_desc.nic_mbuf = 0x00002100;
14825         test_desc.len = size;
14826
14827         /*
14828          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14829          * the *second* time the tg3 driver was getting loaded after an
14830          * initial scan.
14831          *
14832          * Broadcom tells me:
14833          *   ...the DMA engine is connected to the GRC block and a DMA
14834          *   reset may affect the GRC block in some unpredictable way...
14835          *   The behavior of resets to individual blocks has not been tested.
14836          *
14837          * Broadcom noted the GRC reset will also reset all sub-components.
14838          */
14839         if (to_device) {
14840                 test_desc.cqid_sqid = (13 << 8) | 2;
14841
14842                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14843                 udelay(40);
14844         } else {
14845                 test_desc.cqid_sqid = (16 << 8) | 7;
14846
14847                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14848                 udelay(40);
14849         }
14850         test_desc.flags = 0x00000005;
14851
14852         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14853                 u32 val;
14854
14855                 val = *(((u32 *)&test_desc) + i);
14856                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14857                                        sram_dma_descs + (i * sizeof(u32)));
14858                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14859         }
14860         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14861
14862         if (to_device)
14863                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14864         else
14865                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14866
14867         ret = -ENODEV;
14868         for (i = 0; i < 40; i++) {
14869                 u32 val;
14870
14871                 if (to_device)
14872                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14873                 else
14874                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14875                 if ((val & 0xffff) == sram_dma_descs) {
14876                         ret = 0;
14877                         break;
14878                 }
14879
14880                 udelay(100);
14881         }
14882
14883         return ret;
14884 }
14885
14886 #define TEST_BUFFER_SIZE        0x2000
14887
14888 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14889         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14890         { },
14891 };
14892
14893 static int __devinit tg3_test_dma(struct tg3 *tp)
14894 {
14895         dma_addr_t buf_dma;
14896         u32 *buf, saved_dma_rwctrl;
14897         int ret = 0;
14898
14899         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14900                                  &buf_dma, GFP_KERNEL);
14901         if (!buf) {
14902                 ret = -ENOMEM;
14903                 goto out_nofree;
14904         }
14905
14906         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14907                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14908
14909         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14910
14911         if (tg3_flag(tp, 57765_PLUS))
14912                 goto out;
14913
14914         if (tg3_flag(tp, PCI_EXPRESS)) {
14915                 /* DMA read watermark not used on PCIE */
14916                 tp->dma_rwctrl |= 0x00180000;
14917         } else if (!tg3_flag(tp, PCIX_MODE)) {
14918                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14919                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14920                         tp->dma_rwctrl |= 0x003f0000;
14921                 else
14922                         tp->dma_rwctrl |= 0x003f000f;
14923         } else {
14924                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14925                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14926                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14927                         u32 read_water = 0x7;
14928
14929                         /* If the 5704 is behind the EPB bridge, we can
14930                          * do the less restrictive ONE_DMA workaround for
14931                          * better performance.
14932                          */
14933                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14934                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14935                                 tp->dma_rwctrl |= 0x8000;
14936                         else if (ccval == 0x6 || ccval == 0x7)
14937                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14938
14939                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14940                                 read_water = 4;
14941                         /* Set bit 23 to enable PCIX hw bug fix */
14942                         tp->dma_rwctrl |=
14943                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14944                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14945                                 (1 << 23);
14946                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14947                         /* 5780 always in PCIX mode */
14948                         tp->dma_rwctrl |= 0x00144000;
14949                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14950                         /* 5714 always in PCIX mode */
14951                         tp->dma_rwctrl |= 0x00148000;
14952                 } else {
14953                         tp->dma_rwctrl |= 0x001b000f;
14954                 }
14955         }
14956
14957         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14959                 tp->dma_rwctrl &= 0xfffffff0;
14960
14961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14962             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14963                 /* Remove this if it causes problems for some boards. */
14964                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14965
14966                 /* On 5700/5701 chips, we need to set this bit.
14967                  * Otherwise the chip will issue cacheline transactions
14968                  * to streamable DMA memory with not all the byte
14969                  * enables turned on.  This is an error on several
14970                  * RISC PCI controllers, in particular sparc64.
14971                  *
14972                  * On 5703/5704 chips, this bit has been reassigned
14973                  * a different meaning.  In particular, it is used
14974                  * on those chips to enable a PCI-X workaround.
14975                  */
14976                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14977         }
14978
14979         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14980
14981 #if 0
14982         /* Unneeded, already done by tg3_get_invariants.  */
14983         tg3_switch_clocks(tp);
14984 #endif
14985
14986         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14987             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14988                 goto out;
14989
14990         /* It is best to perform DMA test with maximum write burst size
14991          * to expose the 5700/5701 write DMA bug.
14992          */
14993         saved_dma_rwctrl = tp->dma_rwctrl;
14994         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14995         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14996
14997         while (1) {
14998                 u32 *p = buf, i;
14999
15000                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15001                         p[i] = i;
15002
15003                 /* Send the buffer to the chip. */
15004                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15005                 if (ret) {
15006                         dev_err(&tp->pdev->dev,
15007                                 "%s: Buffer write failed. err = %d\n",
15008                                 __func__, ret);
15009                         break;
15010                 }
15011
15012 #if 0
15013                 /* validate data reached card RAM correctly. */
15014                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15015                         u32 val;
15016                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15017                         if (le32_to_cpu(val) != p[i]) {
15018                                 dev_err(&tp->pdev->dev,
15019                                         "%s: Buffer corrupted on device! "
15020                                         "(%d != %d)\n", __func__, val, i);
15021                                 /* ret = -ENODEV here? */
15022                         }
15023                         p[i] = 0;
15024                 }
15025 #endif
15026                 /* Now read it back. */
15027                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15028                 if (ret) {
15029                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15030                                 "err = %d\n", __func__, ret);
15031                         break;
15032                 }
15033
15034                 /* Verify it. */
15035                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15036                         if (p[i] == i)
15037                                 continue;
15038
15039                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15040                             DMA_RWCTRL_WRITE_BNDRY_16) {
15041                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15042                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15043                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15044                                 break;
15045                         } else {
15046                                 dev_err(&tp->pdev->dev,
15047                                         "%s: Buffer corrupted on read back! "
15048                                         "(%d != %d)\n", __func__, p[i], i);
15049                                 ret = -ENODEV;
15050                                 goto out;
15051                         }
15052                 }
15053
15054                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15055                         /* Success. */
15056                         ret = 0;
15057                         break;
15058                 }
15059         }
15060         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15061             DMA_RWCTRL_WRITE_BNDRY_16) {
15062                 /* DMA test passed without adjusting DMA boundary,
15063                  * now look for chipsets that are known to expose the
15064                  * DMA bug without failing the test.
15065                  */
15066                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15067                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15068                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15069                 } else {
15070                         /* Safe to use the calculated DMA boundary. */
15071                         tp->dma_rwctrl = saved_dma_rwctrl;
15072                 }
15073
15074                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15075         }
15076
15077 out:
15078         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15079 out_nofree:
15080         return ret;
15081 }
15082
15083 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15084 {
15085         if (tg3_flag(tp, 57765_PLUS)) {
15086                 tp->bufmgr_config.mbuf_read_dma_low_water =
15087                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15088                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15089                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15090                 tp->bufmgr_config.mbuf_high_water =
15091                         DEFAULT_MB_HIGH_WATER_57765;
15092
15093                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15094                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15095                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15096                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15097                 tp->bufmgr_config.mbuf_high_water_jumbo =
15098                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15099         } else if (tg3_flag(tp, 5705_PLUS)) {
15100                 tp->bufmgr_config.mbuf_read_dma_low_water =
15101                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15102                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15103                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15104                 tp->bufmgr_config.mbuf_high_water =
15105                         DEFAULT_MB_HIGH_WATER_5705;
15106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15107                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15108                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15109                         tp->bufmgr_config.mbuf_high_water =
15110                                 DEFAULT_MB_HIGH_WATER_5906;
15111                 }
15112
15113                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15114                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15115                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15116                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15117                 tp->bufmgr_config.mbuf_high_water_jumbo =
15118                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15119         } else {
15120                 tp->bufmgr_config.mbuf_read_dma_low_water =
15121                         DEFAULT_MB_RDMA_LOW_WATER;
15122                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15123                         DEFAULT_MB_MACRX_LOW_WATER;
15124                 tp->bufmgr_config.mbuf_high_water =
15125                         DEFAULT_MB_HIGH_WATER;
15126
15127                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15128                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15129                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15130                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15131                 tp->bufmgr_config.mbuf_high_water_jumbo =
15132                         DEFAULT_MB_HIGH_WATER_JUMBO;
15133         }
15134
15135         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15136         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15137 }
15138
15139 static char * __devinit tg3_phy_string(struct tg3 *tp)
15140 {
15141         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15142         case TG3_PHY_ID_BCM5400:        return "5400";
15143         case TG3_PHY_ID_BCM5401:        return "5401";
15144         case TG3_PHY_ID_BCM5411:        return "5411";
15145         case TG3_PHY_ID_BCM5701:        return "5701";
15146         case TG3_PHY_ID_BCM5703:        return "5703";
15147         case TG3_PHY_ID_BCM5704:        return "5704";
15148         case TG3_PHY_ID_BCM5705:        return "5705";
15149         case TG3_PHY_ID_BCM5750:        return "5750";
15150         case TG3_PHY_ID_BCM5752:        return "5752";
15151         case TG3_PHY_ID_BCM5714:        return "5714";
15152         case TG3_PHY_ID_BCM5780:        return "5780";
15153         case TG3_PHY_ID_BCM5755:        return "5755";
15154         case TG3_PHY_ID_BCM5787:        return "5787";
15155         case TG3_PHY_ID_BCM5784:        return "5784";
15156         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15157         case TG3_PHY_ID_BCM5906:        return "5906";
15158         case TG3_PHY_ID_BCM5761:        return "5761";
15159         case TG3_PHY_ID_BCM5718C:       return "5718C";
15160         case TG3_PHY_ID_BCM5718S:       return "5718S";
15161         case TG3_PHY_ID_BCM57765:       return "57765";
15162         case TG3_PHY_ID_BCM5719C:       return "5719C";
15163         case TG3_PHY_ID_BCM5720C:       return "5720C";
15164         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15165         case 0:                 return "serdes";
15166         default:                return "unknown";
15167         }
15168 }
15169
15170 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15171 {
15172         if (tg3_flag(tp, PCI_EXPRESS)) {
15173                 strcpy(str, "PCI Express");
15174                 return str;
15175         } else if (tg3_flag(tp, PCIX_MODE)) {
15176                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15177
15178                 strcpy(str, "PCIX:");
15179
15180                 if ((clock_ctrl == 7) ||
15181                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15182                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15183                         strcat(str, "133MHz");
15184                 else if (clock_ctrl == 0)
15185                         strcat(str, "33MHz");
15186                 else if (clock_ctrl == 2)
15187                         strcat(str, "50MHz");
15188                 else if (clock_ctrl == 4)
15189                         strcat(str, "66MHz");
15190                 else if (clock_ctrl == 6)
15191                         strcat(str, "100MHz");
15192         } else {
15193                 strcpy(str, "PCI:");
15194                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15195                         strcat(str, "66MHz");
15196                 else
15197                         strcat(str, "33MHz");
15198         }
15199         if (tg3_flag(tp, PCI_32BIT))
15200                 strcat(str, ":32-bit");
15201         else
15202                 strcat(str, ":64-bit");
15203         return str;
15204 }
15205
15206 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15207 {
15208         struct pci_dev *peer;
15209         unsigned int func, devnr = tp->pdev->devfn & ~7;
15210
15211         for (func = 0; func < 8; func++) {
15212                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15213                 if (peer && peer != tp->pdev)
15214                         break;
15215                 pci_dev_put(peer);
15216         }
15217         /* 5704 can be configured in single-port mode, set peer to
15218          * tp->pdev in that case.
15219          */
15220         if (!peer) {
15221                 peer = tp->pdev;
15222                 return peer;
15223         }
15224
15225         /*
15226          * We don't need to keep the refcount elevated; there's no way
15227          * to remove one half of this device without removing the other
15228          */
15229         pci_dev_put(peer);
15230
15231         return peer;
15232 }
15233
15234 static void __devinit tg3_init_coal(struct tg3 *tp)
15235 {
15236         struct ethtool_coalesce *ec = &tp->coal;
15237
15238         memset(ec, 0, sizeof(*ec));
15239         ec->cmd = ETHTOOL_GCOALESCE;
15240         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15241         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15242         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15243         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15244         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15245         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15246         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15247         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15248         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15249
15250         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15251                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15252                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15253                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15254                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15255                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15256         }
15257
15258         if (tg3_flag(tp, 5705_PLUS)) {
15259                 ec->rx_coalesce_usecs_irq = 0;
15260                 ec->tx_coalesce_usecs_irq = 0;
15261                 ec->stats_block_coalesce_usecs = 0;
15262         }
15263 }
15264
15265 static const struct net_device_ops tg3_netdev_ops = {
15266         .ndo_open               = tg3_open,
15267         .ndo_stop               = tg3_close,
15268         .ndo_start_xmit         = tg3_start_xmit,
15269         .ndo_get_stats64        = tg3_get_stats64,
15270         .ndo_validate_addr      = eth_validate_addr,
15271         .ndo_set_rx_mode        = tg3_set_rx_mode,
15272         .ndo_set_mac_address    = tg3_set_mac_addr,
15273         .ndo_do_ioctl           = tg3_ioctl,
15274         .ndo_tx_timeout         = tg3_tx_timeout,
15275         .ndo_change_mtu         = tg3_change_mtu,
15276         .ndo_fix_features       = tg3_fix_features,
15277         .ndo_set_features       = tg3_set_features,
15278 #ifdef CONFIG_NET_POLL_CONTROLLER
15279         .ndo_poll_controller    = tg3_poll_controller,
15280 #endif
15281 };
15282
15283 static int __devinit tg3_init_one(struct pci_dev *pdev,
15284                                   const struct pci_device_id *ent)
15285 {
15286         struct net_device *dev;
15287         struct tg3 *tp;
15288         int i, err, pm_cap;
15289         u32 sndmbx, rcvmbx, intmbx;
15290         char str[40];
15291         u64 dma_mask, persist_dma_mask;
15292         u32 features = 0;
15293
15294         printk_once(KERN_INFO "%s\n", version);
15295
15296         err = pci_enable_device(pdev);
15297         if (err) {
15298                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15299                 return err;
15300         }
15301
15302         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15303         if (err) {
15304                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15305                 goto err_out_disable_pdev;
15306         }
15307
15308         pci_set_master(pdev);
15309
15310         /* Find power-management capability. */
15311         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15312         if (pm_cap == 0) {
15313                 dev_err(&pdev->dev,
15314                         "Cannot find Power Management capability, aborting\n");
15315                 err = -EIO;
15316                 goto err_out_free_res;
15317         }
15318
15319         err = pci_set_power_state(pdev, PCI_D0);
15320         if (err) {
15321                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15322                 goto err_out_free_res;
15323         }
15324
15325         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15326         if (!dev) {
15327                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15328                 err = -ENOMEM;
15329                 goto err_out_power_down;
15330         }
15331
15332         SET_NETDEV_DEV(dev, &pdev->dev);
15333
15334         tp = netdev_priv(dev);
15335         tp->pdev = pdev;
15336         tp->dev = dev;
15337         tp->pm_cap = pm_cap;
15338         tp->rx_mode = TG3_DEF_RX_MODE;
15339         tp->tx_mode = TG3_DEF_TX_MODE;
15340
15341         if (tg3_debug > 0)
15342                 tp->msg_enable = tg3_debug;
15343         else
15344                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15345
15346         /* The word/byte swap controls here control register access byte
15347          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15348          * setting below.
15349          */
15350         tp->misc_host_ctrl =
15351                 MISC_HOST_CTRL_MASK_PCI_INT |
15352                 MISC_HOST_CTRL_WORD_SWAP |
15353                 MISC_HOST_CTRL_INDIR_ACCESS |
15354                 MISC_HOST_CTRL_PCISTATE_RW;
15355
15356         /* The NONFRM (non-frame) byte/word swap controls take effect
15357          * on descriptor entries, anything which isn't packet data.
15358          *
15359          * The StrongARM chips on the board (one for tx, one for rx)
15360          * are running in big-endian mode.
15361          */
15362         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15363                         GRC_MODE_WSWAP_NONFRM_DATA);
15364 #ifdef __BIG_ENDIAN
15365         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15366 #endif
15367         spin_lock_init(&tp->lock);
15368         spin_lock_init(&tp->indirect_lock);
15369         INIT_WORK(&tp->reset_task, tg3_reset_task);
15370
15371         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15372         if (!tp->regs) {
15373                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15374                 err = -ENOMEM;
15375                 goto err_out_free_dev;
15376         }
15377
15378         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15379             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15380             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15381             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15382             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15383             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15384             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15385             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15386                 tg3_flag_set(tp, ENABLE_APE);
15387                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15388                 if (!tp->aperegs) {
15389                         dev_err(&pdev->dev,
15390                                 "Cannot map APE registers, aborting\n");
15391                         err = -ENOMEM;
15392                         goto err_out_iounmap;
15393                 }
15394         }
15395
15396         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15397         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15398
15399         dev->ethtool_ops = &tg3_ethtool_ops;
15400         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15401         dev->netdev_ops = &tg3_netdev_ops;
15402         dev->irq = pdev->irq;
15403
15404         err = tg3_get_invariants(tp);
15405         if (err) {
15406                 dev_err(&pdev->dev,
15407                         "Problem fetching invariants of chip, aborting\n");
15408                 goto err_out_apeunmap;
15409         }
15410
15411         /* The EPB bridge inside 5714, 5715, and 5780 and any
15412          * device behind the EPB cannot support DMA addresses > 40-bit.
15413          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15414          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15415          * do DMA address check in tg3_start_xmit().
15416          */
15417         if (tg3_flag(tp, IS_5788))
15418                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15419         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15420                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15421 #ifdef CONFIG_HIGHMEM
15422                 dma_mask = DMA_BIT_MASK(64);
15423 #endif
15424         } else
15425                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15426
15427         /* Configure DMA attributes. */
15428         if (dma_mask > DMA_BIT_MASK(32)) {
15429                 err = pci_set_dma_mask(pdev, dma_mask);
15430                 if (!err) {
15431                         features |= NETIF_F_HIGHDMA;
15432                         err = pci_set_consistent_dma_mask(pdev,
15433                                                           persist_dma_mask);
15434                         if (err < 0) {
15435                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15436                                         "DMA for consistent allocations\n");
15437                                 goto err_out_apeunmap;
15438                         }
15439                 }
15440         }
15441         if (err || dma_mask == DMA_BIT_MASK(32)) {
15442                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15443                 if (err) {
15444                         dev_err(&pdev->dev,
15445                                 "No usable DMA configuration, aborting\n");
15446                         goto err_out_apeunmap;
15447                 }
15448         }
15449
15450         tg3_init_bufmgr_config(tp);
15451
15452         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15453
15454         /* 5700 B0 chips do not support checksumming correctly due
15455          * to hardware bugs.
15456          */
15457         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15458                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15459
15460                 if (tg3_flag(tp, 5755_PLUS))
15461                         features |= NETIF_F_IPV6_CSUM;
15462         }
15463
15464         /* TSO is on by default on chips that support hardware TSO.
15465          * Firmware TSO on older chips gives lower performance, so it
15466          * is off by default, but can be enabled using ethtool.
15467          */
15468         if ((tg3_flag(tp, HW_TSO_1) ||
15469              tg3_flag(tp, HW_TSO_2) ||
15470              tg3_flag(tp, HW_TSO_3)) &&
15471             (features & NETIF_F_IP_CSUM))
15472                 features |= NETIF_F_TSO;
15473         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15474                 if (features & NETIF_F_IPV6_CSUM)
15475                         features |= NETIF_F_TSO6;
15476                 if (tg3_flag(tp, HW_TSO_3) ||
15477                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15478                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15479                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15480                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15481                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15482                         features |= NETIF_F_TSO_ECN;
15483         }
15484
15485         dev->features |= features;
15486         dev->vlan_features |= features;
15487
15488         /*
15489          * Add loopback capability only for a subset of devices that support
15490          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15491          * loopback for the remaining devices.
15492          */
15493         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15494             !tg3_flag(tp, CPMU_PRESENT))
15495                 /* Add the loopback capability */
15496                 features |= NETIF_F_LOOPBACK;
15497
15498         dev->hw_features |= features;
15499
15500         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15501             !tg3_flag(tp, TSO_CAPABLE) &&
15502             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15503                 tg3_flag_set(tp, MAX_RXPEND_64);
15504                 tp->rx_pending = 63;
15505         }
15506
15507         err = tg3_get_device_address(tp);
15508         if (err) {
15509                 dev_err(&pdev->dev,
15510                         "Could not obtain valid ethernet address, aborting\n");
15511                 goto err_out_apeunmap;
15512         }
15513
15514         /*
15515          * Reset chip in case UNDI or EFI driver did not shutdown
15516          * DMA self test will enable WDMAC and we'll see (spurious)
15517          * pending DMA on the PCI bus at that point.
15518          */
15519         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15520             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15521                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15522                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15523         }
15524
15525         err = tg3_test_dma(tp);
15526         if (err) {
15527                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15528                 goto err_out_apeunmap;
15529         }
15530
15531         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15532         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15533         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15534         for (i = 0; i < tp->irq_max; i++) {
15535                 struct tg3_napi *tnapi = &tp->napi[i];
15536
15537                 tnapi->tp = tp;
15538                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15539
15540                 tnapi->int_mbox = intmbx;
15541                 if (i <= 4)
15542                         intmbx += 0x8;
15543                 else
15544                         intmbx += 0x4;
15545
15546                 tnapi->consmbox = rcvmbx;
15547                 tnapi->prodmbox = sndmbx;
15548
15549                 if (i)
15550                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15551                 else
15552                         tnapi->coal_now = HOSTCC_MODE_NOW;
15553
15554                 if (!tg3_flag(tp, SUPPORT_MSIX))
15555                         break;
15556
15557                 /*
15558                  * If we support MSIX, we'll be using RSS.  If we're using
15559                  * RSS, the first vector only handles link interrupts and the
15560                  * remaining vectors handle rx and tx interrupts.  Reuse the
15561                  * mailbox values for the next iteration.  The values we setup
15562                  * above are still useful for the single vectored mode.
15563                  */
15564                 if (!i)
15565                         continue;
15566
15567                 rcvmbx += 0x8;
15568
15569                 if (sndmbx & 0x4)
15570                         sndmbx -= 0x4;
15571                 else
15572                         sndmbx += 0xc;
15573         }
15574
15575         tg3_init_coal(tp);
15576
15577         pci_set_drvdata(pdev, dev);
15578
15579         if (tg3_flag(tp, 5717_PLUS)) {
15580                 /* Resume a low-power mode */
15581                 tg3_frob_aux_power(tp, false);
15582         }
15583
15584         err = register_netdev(dev);
15585         if (err) {
15586                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15587                 goto err_out_apeunmap;
15588         }
15589
15590         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15591                     tp->board_part_number,
15592                     tp->pci_chip_rev_id,
15593                     tg3_bus_string(tp, str),
15594                     dev->dev_addr);
15595
15596         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15597                 struct phy_device *phydev;
15598                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15599                 netdev_info(dev,
15600                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15601                             phydev->drv->name, dev_name(&phydev->dev));
15602         } else {
15603                 char *ethtype;
15604
15605                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15606                         ethtype = "10/100Base-TX";
15607                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15608                         ethtype = "1000Base-SX";
15609                 else
15610                         ethtype = "10/100/1000Base-T";
15611
15612                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15613                             "(WireSpeed[%d], EEE[%d])\n",
15614                             tg3_phy_string(tp), ethtype,
15615                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15616                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15617         }
15618
15619         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15620                     (dev->features & NETIF_F_RXCSUM) != 0,
15621                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15622                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15623                     tg3_flag(tp, ENABLE_ASF) != 0,
15624                     tg3_flag(tp, TSO_CAPABLE) != 0);
15625         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15626                     tp->dma_rwctrl,
15627                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15628                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15629
15630         pci_save_state(pdev);
15631
15632         return 0;
15633
15634 err_out_apeunmap:
15635         if (tp->aperegs) {
15636                 iounmap(tp->aperegs);
15637                 tp->aperegs = NULL;
15638         }
15639
15640 err_out_iounmap:
15641         if (tp->regs) {
15642                 iounmap(tp->regs);
15643                 tp->regs = NULL;
15644         }
15645
15646 err_out_free_dev:
15647         free_netdev(dev);
15648
15649 err_out_power_down:
15650         pci_set_power_state(pdev, PCI_D3hot);
15651
15652 err_out_free_res:
15653         pci_release_regions(pdev);
15654
15655 err_out_disable_pdev:
15656         pci_disable_device(pdev);
15657         pci_set_drvdata(pdev, NULL);
15658         return err;
15659 }
15660
15661 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15662 {
15663         struct net_device *dev = pci_get_drvdata(pdev);
15664
15665         if (dev) {
15666                 struct tg3 *tp = netdev_priv(dev);
15667
15668                 if (tp->fw)
15669                         release_firmware(tp->fw);
15670
15671                 cancel_work_sync(&tp->reset_task);
15672
15673                 if (!tg3_flag(tp, USE_PHYLIB)) {
15674                         tg3_phy_fini(tp);
15675                         tg3_mdio_fini(tp);
15676                 }
15677
15678                 unregister_netdev(dev);
15679                 if (tp->aperegs) {
15680                         iounmap(tp->aperegs);
15681                         tp->aperegs = NULL;
15682                 }
15683                 if (tp->regs) {
15684                         iounmap(tp->regs);
15685                         tp->regs = NULL;
15686                 }
15687                 free_netdev(dev);
15688                 pci_release_regions(pdev);
15689                 pci_disable_device(pdev);
15690                 pci_set_drvdata(pdev, NULL);
15691         }
15692 }
15693
15694 #ifdef CONFIG_PM_SLEEP
15695 static int tg3_suspend(struct device *device)
15696 {
15697         struct pci_dev *pdev = to_pci_dev(device);
15698         struct net_device *dev = pci_get_drvdata(pdev);
15699         struct tg3 *tp = netdev_priv(dev);
15700         int err;
15701
15702         if (!netif_running(dev))
15703                 return 0;
15704
15705         flush_work_sync(&tp->reset_task);
15706         tg3_phy_stop(tp);
15707         tg3_netif_stop(tp);
15708
15709         del_timer_sync(&tp->timer);
15710
15711         tg3_full_lock(tp, 1);
15712         tg3_disable_ints(tp);
15713         tg3_full_unlock(tp);
15714
15715         netif_device_detach(dev);
15716
15717         tg3_full_lock(tp, 0);
15718         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15719         tg3_flag_clear(tp, INIT_COMPLETE);
15720         tg3_full_unlock(tp);
15721
15722         err = tg3_power_down_prepare(tp);
15723         if (err) {
15724                 int err2;
15725
15726                 tg3_full_lock(tp, 0);
15727
15728                 tg3_flag_set(tp, INIT_COMPLETE);
15729                 err2 = tg3_restart_hw(tp, 1);
15730                 if (err2)
15731                         goto out;
15732
15733                 tp->timer.expires = jiffies + tp->timer_offset;
15734                 add_timer(&tp->timer);
15735
15736                 netif_device_attach(dev);
15737                 tg3_netif_start(tp);
15738
15739 out:
15740                 tg3_full_unlock(tp);
15741
15742                 if (!err2)
15743                         tg3_phy_start(tp);
15744         }
15745
15746         return err;
15747 }
15748
15749 static int tg3_resume(struct device *device)
15750 {
15751         struct pci_dev *pdev = to_pci_dev(device);
15752         struct net_device *dev = pci_get_drvdata(pdev);
15753         struct tg3 *tp = netdev_priv(dev);
15754         int err;
15755
15756         if (!netif_running(dev))
15757                 return 0;
15758
15759         netif_device_attach(dev);
15760
15761         tg3_full_lock(tp, 0);
15762
15763         tg3_flag_set(tp, INIT_COMPLETE);
15764         err = tg3_restart_hw(tp, 1);
15765         if (err)
15766                 goto out;
15767
15768         tp->timer.expires = jiffies + tp->timer_offset;
15769         add_timer(&tp->timer);
15770
15771         tg3_netif_start(tp);
15772
15773 out:
15774         tg3_full_unlock(tp);
15775
15776         if (!err)
15777                 tg3_phy_start(tp);
15778
15779         return err;
15780 }
15781
15782 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15783 #define TG3_PM_OPS (&tg3_pm_ops)
15784
15785 #else
15786
15787 #define TG3_PM_OPS NULL
15788
15789 #endif /* CONFIG_PM_SLEEP */
15790
15791 /**
15792  * tg3_io_error_detected - called when PCI error is detected
15793  * @pdev: Pointer to PCI device
15794  * @state: The current pci connection state
15795  *
15796  * This function is called after a PCI bus error affecting
15797  * this device has been detected.
15798  */
15799 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15800                                               pci_channel_state_t state)
15801 {
15802         struct net_device *netdev = pci_get_drvdata(pdev);
15803         struct tg3 *tp = netdev_priv(netdev);
15804         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15805
15806         netdev_info(netdev, "PCI I/O error detected\n");
15807
15808         rtnl_lock();
15809
15810         if (!netif_running(netdev))
15811                 goto done;
15812
15813         tg3_phy_stop(tp);
15814
15815         tg3_netif_stop(tp);
15816
15817         del_timer_sync(&tp->timer);
15818         tg3_flag_clear(tp, RESTART_TIMER);
15819
15820         /* Want to make sure that the reset task doesn't run */
15821         cancel_work_sync(&tp->reset_task);
15822         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15823         tg3_flag_clear(tp, RESTART_TIMER);
15824
15825         netif_device_detach(netdev);
15826
15827         /* Clean up software state, even if MMIO is blocked */
15828         tg3_full_lock(tp, 0);
15829         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15830         tg3_full_unlock(tp);
15831
15832 done:
15833         if (state == pci_channel_io_perm_failure)
15834                 err = PCI_ERS_RESULT_DISCONNECT;
15835         else
15836                 pci_disable_device(pdev);
15837
15838         rtnl_unlock();
15839
15840         return err;
15841 }
15842
15843 /**
15844  * tg3_io_slot_reset - called after the pci bus has been reset.
15845  * @pdev: Pointer to PCI device
15846  *
15847  * Restart the card from scratch, as if from a cold-boot.
15848  * At this point, the card has exprienced a hard reset,
15849  * followed by fixups by BIOS, and has its config space
15850  * set up identically to what it was at cold boot.
15851  */
15852 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15853 {
15854         struct net_device *netdev = pci_get_drvdata(pdev);
15855         struct tg3 *tp = netdev_priv(netdev);
15856         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15857         int err;
15858
15859         rtnl_lock();
15860
15861         if (pci_enable_device(pdev)) {
15862                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15863                 goto done;
15864         }
15865
15866         pci_set_master(pdev);
15867         pci_restore_state(pdev);
15868         pci_save_state(pdev);
15869
15870         if (!netif_running(netdev)) {
15871                 rc = PCI_ERS_RESULT_RECOVERED;
15872                 goto done;
15873         }
15874
15875         err = tg3_power_up(tp);
15876         if (err)
15877                 goto done;
15878
15879         rc = PCI_ERS_RESULT_RECOVERED;
15880
15881 done:
15882         rtnl_unlock();
15883
15884         return rc;
15885 }
15886
15887 /**
15888  * tg3_io_resume - called when traffic can start flowing again.
15889  * @pdev: Pointer to PCI device
15890  *
15891  * This callback is called when the error recovery driver tells
15892  * us that its OK to resume normal operation.
15893  */
15894 static void tg3_io_resume(struct pci_dev *pdev)
15895 {
15896         struct net_device *netdev = pci_get_drvdata(pdev);
15897         struct tg3 *tp = netdev_priv(netdev);
15898         int err;
15899
15900         rtnl_lock();
15901
15902         if (!netif_running(netdev))
15903                 goto done;
15904
15905         tg3_full_lock(tp, 0);
15906         tg3_flag_set(tp, INIT_COMPLETE);
15907         err = tg3_restart_hw(tp, 1);
15908         tg3_full_unlock(tp);
15909         if (err) {
15910                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15911                 goto done;
15912         }
15913
15914         netif_device_attach(netdev);
15915
15916         tp->timer.expires = jiffies + tp->timer_offset;
15917         add_timer(&tp->timer);
15918
15919         tg3_netif_start(tp);
15920
15921         tg3_phy_start(tp);
15922
15923 done:
15924         rtnl_unlock();
15925 }
15926
15927 static struct pci_error_handlers tg3_err_handler = {
15928         .error_detected = tg3_io_error_detected,
15929         .slot_reset     = tg3_io_slot_reset,
15930         .resume         = tg3_io_resume
15931 };
15932
15933 static struct pci_driver tg3_driver = {
15934         .name           = DRV_MODULE_NAME,
15935         .id_table       = tg3_pci_tbl,
15936         .probe          = tg3_init_one,
15937         .remove         = __devexit_p(tg3_remove_one),
15938         .err_handler    = &tg3_err_handler,
15939         .driver.pm      = TG3_PM_OPS,
15940 };
15941
15942 static int __init tg3_init(void)
15943 {
15944         return pci_register_driver(&tg3_driver);
15945 }
15946
15947 static void __exit tg3_cleanup(void)
15948 {
15949         pci_unregister_driver(&tg3_driver);
15950 }
15951
15952 module_init(tg3_init);
15953 module_exit(tg3_cleanup);