]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge branches 'idle-fix' and 'misc' into release
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     123
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "March 21, 2012"
95
96 #define RESET_KIND_SHUTDOWN     0
97 #define RESET_KIND_INIT         1
98 #define RESET_KIND_SUSPEND      2
99
100 #define TG3_DEF_RX_MODE         0
101 #define TG3_DEF_TX_MODE         0
102 #define TG3_DEF_MSG_ENABLE        \
103         (NETIF_MSG_DRV          | \
104          NETIF_MSG_PROBE        | \
105          NETIF_MSG_LINK         | \
106          NETIF_MSG_TIMER        | \
107          NETIF_MSG_IFDOWN       | \
108          NETIF_MSG_IFUP         | \
109          NETIF_MSG_RX_ERR       | \
110          NETIF_MSG_TX_ERR)
111
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
113
114 /* length of time before we decide the hardware is borked,
115  * and dev->tx_timeout() should be called to fix the problem
116  */
117
118 #define TG3_TX_TIMEOUT                  (5 * HZ)
119
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU                     60
122 #define TG3_MAX_MTU(tp) \
123         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
124
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126  * You can't change the ring sizes, but you can change where you place
127  * them in the NIC onboard memory.
128  */
129 #define TG3_RX_STD_RING_SIZE(tp) \
130         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING         200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
137
138 /* Do not place this n-ring entries value into the tp struct itself,
139  * we really want to expose these constants to GCC so that modulo et
140  * al.  operations are done with shifts and masks instead of with
141  * hw multiply/modulo instructions.  Another solution would be to
142  * replace things like '% foo' with '& (foo - 1)'.
143  */
144
145 #define TG3_TX_RING_SIZE                512
146 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
147
148 #define TG3_RX_STD_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
155                                  TG3_TX_RING_SIZE)
156 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
157
158 #define TG3_DMA_BYTE_ENAB               64
159
160 #define TG3_RX_STD_DMA_SZ               1536
161 #define TG3_RX_JMB_DMA_SZ               9046
162
163 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
164
165 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
167
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
170
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
173
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175  * that are at least dword aligned when used in PCIX mode.  The driver
176  * works around this bug by double copying the packet.  This workaround
177  * is built into the normal double copy length check for efficiency.
178  *
179  * However, the double copy is only necessary on those architectures
180  * where unaligned memory accesses are inefficient.  For those architectures
181  * where unaligned memory accesses incur little penalty, we can reintegrate
182  * the 5701 in the normal rx path.  Doing so saves a device structure
183  * dereference by hardcoding the double copy threshold in place.
184  */
185 #define TG3_RX_COPY_THRESHOLD           256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
188 #else
189         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
190 #endif
191
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
194 #else
195 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
196 #endif
197
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K            2048
201 #define TG3_TX_BD_DMA_MAX_4K            4096
202
203 #define TG3_RAW_IP_ALIGN 2
204
205 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
206 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1457 {
1458         u32 reg, val;
1459
1460         val = 0;
1461         if (!tg3_readphy(tp, MII_BMCR, &reg))
1462                 val = reg << 16;
1463         if (!tg3_readphy(tp, MII_BMSR, &reg))
1464                 val |= (reg & 0xffff);
1465         *data++ = val;
1466
1467         val = 0;
1468         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1469                 val = reg << 16;
1470         if (!tg3_readphy(tp, MII_LPA, &reg))
1471                 val |= (reg & 0xffff);
1472         *data++ = val;
1473
1474         val = 0;
1475         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1476                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1477                         val = reg << 16;
1478                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1479                         val |= (reg & 0xffff);
1480         }
1481         *data++ = val;
1482
1483         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1484                 val = reg << 16;
1485         else
1486                 val = 0;
1487         *data++ = val;
1488 }
1489
1490 /* tp->lock is held. */
1491 static void tg3_ump_link_report(struct tg3 *tp)
1492 {
1493         u32 data[4];
1494
1495         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1496                 return;
1497
1498         tg3_phy_gather_ump_data(tp, data);
1499
1500         tg3_wait_for_event_ack(tp);
1501
1502         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1508
1509         tg3_generate_fw_event(tp);
1510 }
1511
1512 /* tp->lock is held. */
1513 static void tg3_stop_fw(struct tg3 *tp)
1514 {
1515         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1516                 /* Wait for RX cpu to ACK the previous event. */
1517                 tg3_wait_for_event_ack(tp);
1518
1519                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1520
1521                 tg3_generate_fw_event(tp);
1522
1523                 /* Wait for RX cpu to ACK this event. */
1524                 tg3_wait_for_event_ack(tp);
1525         }
1526 }
1527
1528 /* tp->lock is held. */
1529 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1530 {
1531         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1532                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1533
1534         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1535                 switch (kind) {
1536                 case RESET_KIND_INIT:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_START);
1539                         break;
1540
1541                 case RESET_KIND_SHUTDOWN:
1542                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1543                                       DRV_STATE_UNLOAD);
1544                         break;
1545
1546                 case RESET_KIND_SUSPEND:
1547                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1548                                       DRV_STATE_SUSPEND);
1549                         break;
1550
1551                 default:
1552                         break;
1553                 }
1554         }
1555
1556         if (kind == RESET_KIND_INIT ||
1557             kind == RESET_KIND_SUSPEND)
1558                 tg3_ape_driver_state_change(tp, kind);
1559 }
1560
1561 /* tp->lock is held. */
1562 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1563 {
1564         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1565                 switch (kind) {
1566                 case RESET_KIND_INIT:
1567                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1568                                       DRV_STATE_START_DONE);
1569                         break;
1570
1571                 case RESET_KIND_SHUTDOWN:
1572                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573                                       DRV_STATE_UNLOAD_DONE);
1574                         break;
1575
1576                 default:
1577                         break;
1578                 }
1579         }
1580
1581         if (kind == RESET_KIND_SHUTDOWN)
1582                 tg3_ape_driver_state_change(tp, kind);
1583 }
1584
1585 /* tp->lock is held. */
1586 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1587 {
1588         if (tg3_flag(tp, ENABLE_ASF)) {
1589                 switch (kind) {
1590                 case RESET_KIND_INIT:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_START);
1593                         break;
1594
1595                 case RESET_KIND_SHUTDOWN:
1596                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1597                                       DRV_STATE_UNLOAD);
1598                         break;
1599
1600                 case RESET_KIND_SUSPEND:
1601                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1602                                       DRV_STATE_SUSPEND);
1603                         break;
1604
1605                 default:
1606                         break;
1607                 }
1608         }
1609 }
1610
1611 static int tg3_poll_fw(struct tg3 *tp)
1612 {
1613         int i;
1614         u32 val;
1615
1616         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1617                 /* Wait up to 20ms for init done. */
1618                 for (i = 0; i < 200; i++) {
1619                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1620                                 return 0;
1621                         udelay(100);
1622                 }
1623                 return -ENODEV;
1624         }
1625
1626         /* Wait for firmware initialization to complete. */
1627         for (i = 0; i < 100000; i++) {
1628                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1629                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1630                         break;
1631                 udelay(10);
1632         }
1633
1634         /* Chip might not be fitted with firmware.  Some Sun onboard
1635          * parts are configured like that.  So don't signal the timeout
1636          * of the above loop as an error, but do report the lack of
1637          * running firmware once.
1638          */
1639         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1640                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1641
1642                 netdev_info(tp->dev, "No firmware running\n");
1643         }
1644
1645         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1646                 /* The 57765 A0 needs a little more
1647                  * time to do some important work.
1648                  */
1649                 mdelay(10);
1650         }
1651
1652         return 0;
1653 }
1654
1655 static void tg3_link_report(struct tg3 *tp)
1656 {
1657         if (!netif_carrier_ok(tp->dev)) {
1658                 netif_info(tp, link, tp->dev, "Link is down\n");
1659                 tg3_ump_link_report(tp);
1660         } else if (netif_msg_link(tp)) {
1661                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1662                             (tp->link_config.active_speed == SPEED_1000 ?
1663                              1000 :
1664                              (tp->link_config.active_speed == SPEED_100 ?
1665                               100 : 10)),
1666                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1667                              "full" : "half"));
1668
1669                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1670                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1671                             "on" : "off",
1672                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1673                             "on" : "off");
1674
1675                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1676                         netdev_info(tp->dev, "EEE is %s\n",
1677                                     tp->setlpicnt ? "enabled" : "disabled");
1678
1679                 tg3_ump_link_report(tp);
1680         }
1681 }
1682
1683 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1684 {
1685         u16 miireg;
1686
1687         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1688                 miireg = ADVERTISE_1000XPAUSE;
1689         else if (flow_ctrl & FLOW_CTRL_TX)
1690                 miireg = ADVERTISE_1000XPSE_ASYM;
1691         else if (flow_ctrl & FLOW_CTRL_RX)
1692                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1693         else
1694                 miireg = 0;
1695
1696         return miireg;
1697 }
1698
1699 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1700 {
1701         u8 cap = 0;
1702
1703         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1704                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1705         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1706                 if (lcladv & ADVERTISE_1000XPAUSE)
1707                         cap = FLOW_CTRL_RX;
1708                 if (rmtadv & ADVERTISE_1000XPAUSE)
1709                         cap = FLOW_CTRL_TX;
1710         }
1711
1712         return cap;
1713 }
1714
1715 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1716 {
1717         u8 autoneg;
1718         u8 flowctrl = 0;
1719         u32 old_rx_mode = tp->rx_mode;
1720         u32 old_tx_mode = tp->tx_mode;
1721
1722         if (tg3_flag(tp, USE_PHYLIB))
1723                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1724         else
1725                 autoneg = tp->link_config.autoneg;
1726
1727         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1728                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1729                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1730                 else
1731                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1732         } else
1733                 flowctrl = tp->link_config.flowctrl;
1734
1735         tp->link_config.active_flowctrl = flowctrl;
1736
1737         if (flowctrl & FLOW_CTRL_RX)
1738                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1739         else
1740                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1741
1742         if (old_rx_mode != tp->rx_mode)
1743                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1744
1745         if (flowctrl & FLOW_CTRL_TX)
1746                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1747         else
1748                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1749
1750         if (old_tx_mode != tp->tx_mode)
1751                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1752 }
1753
1754 static void tg3_adjust_link(struct net_device *dev)
1755 {
1756         u8 oldflowctrl, linkmesg = 0;
1757         u32 mac_mode, lcl_adv, rmt_adv;
1758         struct tg3 *tp = netdev_priv(dev);
1759         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1760
1761         spin_lock_bh(&tp->lock);
1762
1763         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1764                                     MAC_MODE_HALF_DUPLEX);
1765
1766         oldflowctrl = tp->link_config.active_flowctrl;
1767
1768         if (phydev->link) {
1769                 lcl_adv = 0;
1770                 rmt_adv = 0;
1771
1772                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1773                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1774                 else if (phydev->speed == SPEED_1000 ||
1775                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1776                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1777                 else
1778                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1779
1780                 if (phydev->duplex == DUPLEX_HALF)
1781                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1782                 else {
1783                         lcl_adv = mii_advertise_flowctrl(
1784                                   tp->link_config.flowctrl);
1785
1786                         if (phydev->pause)
1787                                 rmt_adv = LPA_PAUSE_CAP;
1788                         if (phydev->asym_pause)
1789                                 rmt_adv |= LPA_PAUSE_ASYM;
1790                 }
1791
1792                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1793         } else
1794                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1795
1796         if (mac_mode != tp->mac_mode) {
1797                 tp->mac_mode = mac_mode;
1798                 tw32_f(MAC_MODE, tp->mac_mode);
1799                 udelay(40);
1800         }
1801
1802         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1803                 if (phydev->speed == SPEED_10)
1804                         tw32(MAC_MI_STAT,
1805                              MAC_MI_STAT_10MBPS_MODE |
1806                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1807                 else
1808                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809         }
1810
1811         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1812                 tw32(MAC_TX_LENGTHS,
1813                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1814                       (6 << TX_LENGTHS_IPG_SHIFT) |
1815                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1816         else
1817                 tw32(MAC_TX_LENGTHS,
1818                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819                       (6 << TX_LENGTHS_IPG_SHIFT) |
1820                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1821
1822         if (phydev->link != tp->old_link ||
1823             phydev->speed != tp->link_config.active_speed ||
1824             phydev->duplex != tp->link_config.active_duplex ||
1825             oldflowctrl != tp->link_config.active_flowctrl)
1826                 linkmesg = 1;
1827
1828         tp->old_link = phydev->link;
1829         tp->link_config.active_speed = phydev->speed;
1830         tp->link_config.active_duplex = phydev->duplex;
1831
1832         spin_unlock_bh(&tp->lock);
1833
1834         if (linkmesg)
1835                 tg3_link_report(tp);
1836 }
1837
1838 static int tg3_phy_init(struct tg3 *tp)
1839 {
1840         struct phy_device *phydev;
1841
1842         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1843                 return 0;
1844
1845         /* Bring the PHY back to a known state. */
1846         tg3_bmcr_reset(tp);
1847
1848         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1849
1850         /* Attach the MAC to the PHY. */
1851         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1852                              phydev->dev_flags, phydev->interface);
1853         if (IS_ERR(phydev)) {
1854                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1855                 return PTR_ERR(phydev);
1856         }
1857
1858         /* Mask with MAC supported features. */
1859         switch (phydev->interface) {
1860         case PHY_INTERFACE_MODE_GMII:
1861         case PHY_INTERFACE_MODE_RGMII:
1862                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1863                         phydev->supported &= (PHY_GBIT_FEATURES |
1864                                               SUPPORTED_Pause |
1865                                               SUPPORTED_Asym_Pause);
1866                         break;
1867                 }
1868                 /* fallthru */
1869         case PHY_INTERFACE_MODE_MII:
1870                 phydev->supported &= (PHY_BASIC_FEATURES |
1871                                       SUPPORTED_Pause |
1872                                       SUPPORTED_Asym_Pause);
1873                 break;
1874         default:
1875                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1876                 return -EINVAL;
1877         }
1878
1879         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1880
1881         phydev->advertising = phydev->supported;
1882
1883         return 0;
1884 }
1885
1886 static void tg3_phy_start(struct tg3 *tp)
1887 {
1888         struct phy_device *phydev;
1889
1890         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1891                 return;
1892
1893         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1894
1895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1896                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1897                 phydev->speed = tp->link_config.speed;
1898                 phydev->duplex = tp->link_config.duplex;
1899                 phydev->autoneg = tp->link_config.autoneg;
1900                 phydev->advertising = tp->link_config.advertising;
1901         }
1902
1903         phy_start(phydev);
1904
1905         phy_start_aneg(phydev);
1906 }
1907
1908 static void tg3_phy_stop(struct tg3 *tp)
1909 {
1910         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1911                 return;
1912
1913         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1914 }
1915
1916 static void tg3_phy_fini(struct tg3 *tp)
1917 {
1918         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1919                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1920                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1921         }
1922 }
1923
1924 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1925 {
1926         int err;
1927         u32 val;
1928
1929         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1930                 return 0;
1931
1932         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1933                 /* Cannot do read-modify-write on 5401 */
1934                 err = tg3_phy_auxctl_write(tp,
1935                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1936                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1937                                            0x4c20);
1938                 goto done;
1939         }
1940
1941         err = tg3_phy_auxctl_read(tp,
1942                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1943         if (err)
1944                 return err;
1945
1946         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1947         err = tg3_phy_auxctl_write(tp,
1948                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1949
1950 done:
1951         return err;
1952 }
1953
1954 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1955 {
1956         u32 phytest;
1957
1958         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1959                 u32 phy;
1960
1961                 tg3_writephy(tp, MII_TG3_FET_TEST,
1962                              phytest | MII_TG3_FET_SHADOW_EN);
1963                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1964                         if (enable)
1965                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1966                         else
1967                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1969                 }
1970                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1971         }
1972 }
1973
1974 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1975 {
1976         u32 reg;
1977
1978         if (!tg3_flag(tp, 5705_PLUS) ||
1979             (tg3_flag(tp, 5717_PLUS) &&
1980              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1981                 return;
1982
1983         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1984                 tg3_phy_fet_toggle_apd(tp, enable);
1985                 return;
1986         }
1987
1988         reg = MII_TG3_MISC_SHDW_WREN |
1989               MII_TG3_MISC_SHDW_SCR5_SEL |
1990               MII_TG3_MISC_SHDW_SCR5_LPED |
1991               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1992               MII_TG3_MISC_SHDW_SCR5_SDTL |
1993               MII_TG3_MISC_SHDW_SCR5_C125OE;
1994         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1995                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1996
1997         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1998
1999
2000         reg = MII_TG3_MISC_SHDW_WREN |
2001               MII_TG3_MISC_SHDW_APD_SEL |
2002               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2003         if (enable)
2004                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2005
2006         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2007 }
2008
2009 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2010 {
2011         u32 phy;
2012
2013         if (!tg3_flag(tp, 5705_PLUS) ||
2014             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2015                 return;
2016
2017         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2018                 u32 ephy;
2019
2020                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2021                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2022
2023                         tg3_writephy(tp, MII_TG3_FET_TEST,
2024                                      ephy | MII_TG3_FET_SHADOW_EN);
2025                         if (!tg3_readphy(tp, reg, &phy)) {
2026                                 if (enable)
2027                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2028                                 else
2029                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030                                 tg3_writephy(tp, reg, phy);
2031                         }
2032                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2033                 }
2034         } else {
2035                 int ret;
2036
2037                 ret = tg3_phy_auxctl_read(tp,
2038                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2039                 if (!ret) {
2040                         if (enable)
2041                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2042                         else
2043                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044                         tg3_phy_auxctl_write(tp,
2045                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2046                 }
2047         }
2048 }
2049
2050 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2051 {
2052         int ret;
2053         u32 val;
2054
2055         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2056                 return;
2057
2058         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2059         if (!ret)
2060                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2061                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2062 }
2063
2064 static void tg3_phy_apply_otp(struct tg3 *tp)
2065 {
2066         u32 otp, phy;
2067
2068         if (!tp->phy_otp)
2069                 return;
2070
2071         otp = tp->phy_otp;
2072
2073         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2074                 return;
2075
2076         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2077         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2078         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2079
2080         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2081               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2083
2084         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2085         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2086         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2087
2088         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2089         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2090
2091         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2092         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2093
2094         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2095               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2096         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2097
2098         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2099 }
2100
2101 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2102 {
2103         u32 val;
2104
2105         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2106                 return;
2107
2108         tp->setlpicnt = 0;
2109
2110         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2111             current_link_up == 1 &&
2112             tp->link_config.active_duplex == DUPLEX_FULL &&
2113             (tp->link_config.active_speed == SPEED_100 ||
2114              tp->link_config.active_speed == SPEED_1000)) {
2115                 u32 eeectl;
2116
2117                 if (tp->link_config.active_speed == SPEED_1000)
2118                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2119                 else
2120                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2121
2122                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2123
2124                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2125                                   TG3_CL45_D7_EEERES_STAT, &val);
2126
2127                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2128                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2129                         tp->setlpicnt = 2;
2130         }
2131
2132         if (!tp->setlpicnt) {
2133                 if (current_link_up == 1 &&
2134                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2135                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138
2139                 val = tr32(TG3_CPMU_EEE_MODE);
2140                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2141         }
2142 }
2143
2144 static void tg3_phy_eee_enable(struct tg3 *tp)
2145 {
2146         u32 val;
2147
2148         if (tp->link_config.active_speed == SPEED_1000 &&
2149             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2150              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2151              tg3_flag(tp, 57765_CLASS)) &&
2152             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2153                 val = MII_TG3_DSP_TAP26_ALNOKO |
2154                       MII_TG3_DSP_TAP26_RMRXSTO;
2155                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2156                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2157         }
2158
2159         val = tr32(TG3_CPMU_EEE_MODE);
2160         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2161 }
2162
2163 static int tg3_wait_macro_done(struct tg3 *tp)
2164 {
2165         int limit = 100;
2166
2167         while (limit--) {
2168                 u32 tmp32;
2169
2170                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2171                         if ((tmp32 & 0x1000) == 0)
2172                                 break;
2173                 }
2174         }
2175         if (limit < 0)
2176                 return -EBUSY;
2177
2178         return 0;
2179 }
2180
2181 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2182 {
2183         static const u32 test_pat[4][6] = {
2184         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2185         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2186         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2187         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2188         };
2189         int chan;
2190
2191         for (chan = 0; chan < 4; chan++) {
2192                 int i;
2193
2194                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2195                              (chan * 0x2000) | 0x0200);
2196                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2197
2198                 for (i = 0; i < 6; i++)
2199                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2200                                      test_pat[chan][i]);
2201
2202                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2203                 if (tg3_wait_macro_done(tp)) {
2204                         *resetp = 1;
2205                         return -EBUSY;
2206                 }
2207
2208                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2209                              (chan * 0x2000) | 0x0200);
2210                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2211                 if (tg3_wait_macro_done(tp)) {
2212                         *resetp = 1;
2213                         return -EBUSY;
2214                 }
2215
2216                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2217                 if (tg3_wait_macro_done(tp)) {
2218                         *resetp = 1;
2219                         return -EBUSY;
2220                 }
2221
2222                 for (i = 0; i < 6; i += 2) {
2223                         u32 low, high;
2224
2225                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2226                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2227                             tg3_wait_macro_done(tp)) {
2228                                 *resetp = 1;
2229                                 return -EBUSY;
2230                         }
2231                         low &= 0x7fff;
2232                         high &= 0x000f;
2233                         if (low != test_pat[chan][i] ||
2234                             high != test_pat[chan][i+1]) {
2235                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2236                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2238
2239                                 return -EBUSY;
2240                         }
2241                 }
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2248 {
2249         int chan;
2250
2251         for (chan = 0; chan < 4; chan++) {
2252                 int i;
2253
2254                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2255                              (chan * 0x2000) | 0x0200);
2256                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2257                 for (i = 0; i < 6; i++)
2258                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2259                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2260                 if (tg3_wait_macro_done(tp))
2261                         return -EBUSY;
2262         }
2263
2264         return 0;
2265 }
2266
2267 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2268 {
2269         u32 reg32, phy9_orig;
2270         int retries, do_phy_reset, err;
2271
2272         retries = 10;
2273         do_phy_reset = 1;
2274         do {
2275                 if (do_phy_reset) {
2276                         err = tg3_bmcr_reset(tp);
2277                         if (err)
2278                                 return err;
2279                         do_phy_reset = 0;
2280                 }
2281
2282                 /* Disable transmitter and interrupt.  */
2283                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2284                         continue;
2285
2286                 reg32 |= 0x3000;
2287                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2288
2289                 /* Set full-duplex, 1000 mbps.  */
2290                 tg3_writephy(tp, MII_BMCR,
2291                              BMCR_FULLDPLX | BMCR_SPEED1000);
2292
2293                 /* Set to master mode.  */
2294                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2295                         continue;
2296
2297                 tg3_writephy(tp, MII_CTRL1000,
2298                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2299
2300                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2301                 if (err)
2302                         return err;
2303
2304                 /* Block the PHY control access.  */
2305                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2306
2307                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2308                 if (!err)
2309                         break;
2310         } while (--retries);
2311
2312         err = tg3_phy_reset_chanpat(tp);
2313         if (err)
2314                 return err;
2315
2316         tg3_phydsp_write(tp, 0x8005, 0x0000);
2317
2318         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2319         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2320
2321         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2322
2323         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2324
2325         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2326                 reg32 &= ~0x3000;
2327                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2328         } else if (!err)
2329                 err = -EBUSY;
2330
2331         return err;
2332 }
2333
2334 /* This will reset the tigon3 PHY if there is no valid
2335  * link unless the FORCE argument is non-zero.
2336  */
2337 static int tg3_phy_reset(struct tg3 *tp)
2338 {
2339         u32 val, cpmuctrl;
2340         int err;
2341
2342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2343                 val = tr32(GRC_MISC_CFG);
2344                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2345                 udelay(40);
2346         }
2347         err  = tg3_readphy(tp, MII_BMSR, &val);
2348         err |= tg3_readphy(tp, MII_BMSR, &val);
2349         if (err != 0)
2350                 return -EBUSY;
2351
2352         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2353                 netif_carrier_off(tp->dev);
2354                 tg3_link_report(tp);
2355         }
2356
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2360                 err = tg3_phy_reset_5703_4_5(tp);
2361                 if (err)
2362                         return err;
2363                 goto out;
2364         }
2365
2366         cpmuctrl = 0;
2367         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2368             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2369                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2370                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2371                         tw32(TG3_CPMU_CTRL,
2372                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2373         }
2374
2375         err = tg3_bmcr_reset(tp);
2376         if (err)
2377                 return err;
2378
2379         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2380                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2381                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2382
2383                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2384         }
2385
2386         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2387             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2388                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2389                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2390                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2391                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2392                         udelay(40);
2393                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2394                 }
2395         }
2396
2397         if (tg3_flag(tp, 5717_PLUS) &&
2398             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2399                 return 0;
2400
2401         tg3_phy_apply_otp(tp);
2402
2403         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2404                 tg3_phy_toggle_apd(tp, true);
2405         else
2406                 tg3_phy_toggle_apd(tp, false);
2407
2408 out:
2409         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2410             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2411                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2412                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2413                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2414         }
2415
2416         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2417                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419         }
2420
2421         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2422                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2423                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2424                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2425                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2426                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2427                 }
2428         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2429                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2430                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2431                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2432                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2433                                 tg3_writephy(tp, MII_TG3_TEST1,
2434                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2435                         } else
2436                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2437
2438                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2439                 }
2440         }
2441
2442         /* Set Extended packet length bit (bit 14) on all chips that */
2443         /* support jumbo frames */
2444         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2445                 /* Cannot do read-modify-write on 5401 */
2446                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2447         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2448                 /* Set bit 14 with read-modify-write to preserve other bits */
2449                 err = tg3_phy_auxctl_read(tp,
2450                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2451                 if (!err)
2452                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2453                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2454         }
2455
2456         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2457          * jumbo frames transmission.
2458          */
2459         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2460                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2461                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2462                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2463         }
2464
2465         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2466                 /* adjust output voltage */
2467                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2468         }
2469
2470         tg3_phy_toggle_automdix(tp, 1);
2471         tg3_phy_set_wirespeed(tp);
2472         return 0;
2473 }
2474
2475 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2476 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2477 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2478                                           TG3_GPIO_MSG_NEED_VAUX)
2479 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2480         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2481          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 12))
2484
2485 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2486         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2487          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 12))
2490
2491 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2492 {
2493         u32 status, shift;
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2498         else
2499                 status = tr32(TG3_CPMU_DRV_STATUS);
2500
2501         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2502         status &= ~(TG3_GPIO_MSG_MASK << shift);
2503         status |= (newstat << shift);
2504
2505         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2506             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2507                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2508         else
2509                 tw32(TG3_CPMU_DRV_STATUS, status);
2510
2511         return status >> TG3_APE_GPIO_MSG_SHIFT;
2512 }
2513
2514 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2515 {
2516         if (!tg3_flag(tp, IS_NIC))
2517                 return 0;
2518
2519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2520             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2522                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2523                         return -EIO;
2524
2525                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2526
2527                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2528                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2529
2530                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2531         } else {
2532                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2534         }
2535
2536         return 0;
2537 }
2538
2539 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2540 {
2541         u32 grc_local_ctrl;
2542
2543         if (!tg3_flag(tp, IS_NIC) ||
2544             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2546                 return;
2547
2548         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2549
2550         tw32_wait_f(GRC_LOCAL_CTRL,
2551                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2552                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2553
2554         tw32_wait_f(GRC_LOCAL_CTRL,
2555                     grc_local_ctrl,
2556                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2557
2558         tw32_wait_f(GRC_LOCAL_CTRL,
2559                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2560                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2561 }
2562
2563 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2564 {
2565         if (!tg3_flag(tp, IS_NIC))
2566                 return;
2567
2568         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2569             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2570                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2571                             (GRC_LCLCTRL_GPIO_OE0 |
2572                              GRC_LCLCTRL_GPIO_OE1 |
2573                              GRC_LCLCTRL_GPIO_OE2 |
2574                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT1),
2576                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2577         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2578                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2579                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2580                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2581                                      GRC_LCLCTRL_GPIO_OE1 |
2582                                      GRC_LCLCTRL_GPIO_OE2 |
2583                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2585                                      tp->grc_local_ctrl;
2586                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2587                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2588
2589                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2590                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2591                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2592
2593                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2594                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2595                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2596         } else {
2597                 u32 no_gpio2;
2598                 u32 grc_local_ctrl = 0;
2599
2600                 /* Workaround to prevent overdrawing Amps. */
2601                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2602                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2603                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2604                                     grc_local_ctrl,
2605                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2606                 }
2607
2608                 /* On 5753 and variants, GPIO2 cannot be used. */
2609                 no_gpio2 = tp->nic_sram_data_cfg &
2610                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2611
2612                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2613                                   GRC_LCLCTRL_GPIO_OE1 |
2614                                   GRC_LCLCTRL_GPIO_OE2 |
2615                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2617                 if (no_gpio2) {
2618                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2619                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2620                 }
2621                 tw32_wait_f(GRC_LOCAL_CTRL,
2622                             tp->grc_local_ctrl | grc_local_ctrl,
2623                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2624
2625                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2626
2627                 tw32_wait_f(GRC_LOCAL_CTRL,
2628                             tp->grc_local_ctrl | grc_local_ctrl,
2629                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2630
2631                 if (!no_gpio2) {
2632                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2633                         tw32_wait_f(GRC_LOCAL_CTRL,
2634                                     tp->grc_local_ctrl | grc_local_ctrl,
2635                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2636                 }
2637         }
2638 }
2639
2640 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2641 {
2642         u32 msg = 0;
2643
2644         /* Serialize power state transitions */
2645         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2646                 return;
2647
2648         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2649                 msg = TG3_GPIO_MSG_NEED_VAUX;
2650
2651         msg = tg3_set_function_status(tp, msg);
2652
2653         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2654                 goto done;
2655
2656         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2657                 tg3_pwrsrc_switch_to_vaux(tp);
2658         else
2659                 tg3_pwrsrc_die_with_vmain(tp);
2660
2661 done:
2662         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2663 }
2664
2665 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2666 {
2667         bool need_vaux = false;
2668
2669         /* The GPIOs do something completely different on 57765. */
2670         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2671                 return;
2672
2673         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2674             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2676                 tg3_frob_aux_power_5717(tp, include_wol ?
2677                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2678                 return;
2679         }
2680
2681         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2682                 struct net_device *dev_peer;
2683
2684                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2685
2686                 /* remove_one() may have been run on the peer. */
2687                 if (dev_peer) {
2688                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2689
2690                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2691                                 return;
2692
2693                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2694                             tg3_flag(tp_peer, ENABLE_ASF))
2695                                 need_vaux = true;
2696                 }
2697         }
2698
2699         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2700             tg3_flag(tp, ENABLE_ASF))
2701                 need_vaux = true;
2702
2703         if (need_vaux)
2704                 tg3_pwrsrc_switch_to_vaux(tp);
2705         else
2706                 tg3_pwrsrc_die_with_vmain(tp);
2707 }
2708
2709 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2710 {
2711         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2712                 return 1;
2713         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2714                 if (speed != SPEED_10)
2715                         return 1;
2716         } else if (speed == SPEED_10)
2717                 return 1;
2718
2719         return 0;
2720 }
2721
2722 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2723 {
2724         u32 val;
2725
2726         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2727                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2728                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2729                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2730
2731                         sg_dig_ctrl |=
2732                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2733                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2734                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2735                 }
2736                 return;
2737         }
2738
2739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2740                 tg3_bmcr_reset(tp);
2741                 val = tr32(GRC_MISC_CFG);
2742                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2743                 udelay(40);
2744                 return;
2745         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2746                 u32 phytest;
2747                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2748                         u32 phy;
2749
2750                         tg3_writephy(tp, MII_ADVERTISE, 0);
2751                         tg3_writephy(tp, MII_BMCR,
2752                                      BMCR_ANENABLE | BMCR_ANRESTART);
2753
2754                         tg3_writephy(tp, MII_TG3_FET_TEST,
2755                                      phytest | MII_TG3_FET_SHADOW_EN);
2756                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2757                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2758                                 tg3_writephy(tp,
2759                                              MII_TG3_FET_SHDW_AUXMODE4,
2760                                              phy);
2761                         }
2762                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2763                 }
2764                 return;
2765         } else if (do_low_power) {
2766                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2767                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2768
2769                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2770                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2771                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2772                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2773         }
2774
2775         /* The PHY should not be powered down on some chips because
2776          * of bugs.
2777          */
2778         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2779             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2780             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2781              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2782                 return;
2783
2784         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2785             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2786                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2787                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2788                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2789                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2790         }
2791
2792         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2793 }
2794
2795 /* tp->lock is held. */
2796 static int tg3_nvram_lock(struct tg3 *tp)
2797 {
2798         if (tg3_flag(tp, NVRAM)) {
2799                 int i;
2800
2801                 if (tp->nvram_lock_cnt == 0) {
2802                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2803                         for (i = 0; i < 8000; i++) {
2804                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2805                                         break;
2806                                 udelay(20);
2807                         }
2808                         if (i == 8000) {
2809                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2810                                 return -ENODEV;
2811                         }
2812                 }
2813                 tp->nvram_lock_cnt++;
2814         }
2815         return 0;
2816 }
2817
2818 /* tp->lock is held. */
2819 static void tg3_nvram_unlock(struct tg3 *tp)
2820 {
2821         if (tg3_flag(tp, NVRAM)) {
2822                 if (tp->nvram_lock_cnt > 0)
2823                         tp->nvram_lock_cnt--;
2824                 if (tp->nvram_lock_cnt == 0)
2825                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_enable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2836         }
2837 }
2838
2839 /* tp->lock is held. */
2840 static void tg3_disable_nvram_access(struct tg3 *tp)
2841 {
2842         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2843                 u32 nvaccess = tr32(NVRAM_ACCESS);
2844
2845                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2846         }
2847 }
2848
2849 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2850                                         u32 offset, u32 *val)
2851 {
2852         u32 tmp;
2853         int i;
2854
2855         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2856                 return -EINVAL;
2857
2858         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2859                                         EEPROM_ADDR_DEVID_MASK |
2860                                         EEPROM_ADDR_READ);
2861         tw32(GRC_EEPROM_ADDR,
2862              tmp |
2863              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2864              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2865               EEPROM_ADDR_ADDR_MASK) |
2866              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2867
2868         for (i = 0; i < 1000; i++) {
2869                 tmp = tr32(GRC_EEPROM_ADDR);
2870
2871                 if (tmp & EEPROM_ADDR_COMPLETE)
2872                         break;
2873                 msleep(1);
2874         }
2875         if (!(tmp & EEPROM_ADDR_COMPLETE))
2876                 return -EBUSY;
2877
2878         tmp = tr32(GRC_EEPROM_DATA);
2879
2880         /*
2881          * The data will always be opposite the native endian
2882          * format.  Perform a blind byteswap to compensate.
2883          */
2884         *val = swab32(tmp);
2885
2886         return 0;
2887 }
2888
2889 #define NVRAM_CMD_TIMEOUT 10000
2890
2891 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2892 {
2893         int i;
2894
2895         tw32(NVRAM_CMD, nvram_cmd);
2896         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2897                 udelay(10);
2898                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2899                         udelay(10);
2900                         break;
2901                 }
2902         }
2903
2904         if (i == NVRAM_CMD_TIMEOUT)
2905                 return -EBUSY;
2906
2907         return 0;
2908 }
2909
2910 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2911 {
2912         if (tg3_flag(tp, NVRAM) &&
2913             tg3_flag(tp, NVRAM_BUFFERED) &&
2914             tg3_flag(tp, FLASH) &&
2915             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2916             (tp->nvram_jedecnum == JEDEC_ATMEL))
2917
2918                 addr = ((addr / tp->nvram_pagesize) <<
2919                         ATMEL_AT45DB0X1B_PAGE_POS) +
2920                        (addr % tp->nvram_pagesize);
2921
2922         return addr;
2923 }
2924
2925 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2926 {
2927         if (tg3_flag(tp, NVRAM) &&
2928             tg3_flag(tp, NVRAM_BUFFERED) &&
2929             tg3_flag(tp, FLASH) &&
2930             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2931             (tp->nvram_jedecnum == JEDEC_ATMEL))
2932
2933                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2934                         tp->nvram_pagesize) +
2935                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2936
2937         return addr;
2938 }
2939
2940 /* NOTE: Data read in from NVRAM is byteswapped according to
2941  * the byteswapping settings for all other register accesses.
2942  * tg3 devices are BE devices, so on a BE machine, the data
2943  * returned will be exactly as it is seen in NVRAM.  On a LE
2944  * machine, the 32-bit value will be byteswapped.
2945  */
2946 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2947 {
2948         int ret;
2949
2950         if (!tg3_flag(tp, NVRAM))
2951                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2952
2953         offset = tg3_nvram_phys_addr(tp, offset);
2954
2955         if (offset > NVRAM_ADDR_MSK)
2956                 return -EINVAL;
2957
2958         ret = tg3_nvram_lock(tp);
2959         if (ret)
2960                 return ret;
2961
2962         tg3_enable_nvram_access(tp);
2963
2964         tw32(NVRAM_ADDR, offset);
2965         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2966                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2967
2968         if (ret == 0)
2969                 *val = tr32(NVRAM_RDDATA);
2970
2971         tg3_disable_nvram_access(tp);
2972
2973         tg3_nvram_unlock(tp);
2974
2975         return ret;
2976 }
2977
2978 /* Ensures NVRAM data is in bytestream format. */
2979 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2980 {
2981         u32 v;
2982         int res = tg3_nvram_read(tp, offset, &v);
2983         if (!res)
2984                 *val = cpu_to_be32(v);
2985         return res;
2986 }
2987
2988 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2989                                     u32 offset, u32 len, u8 *buf)
2990 {
2991         int i, j, rc = 0;
2992         u32 val;
2993
2994         for (i = 0; i < len; i += 4) {
2995                 u32 addr;
2996                 __be32 data;
2997
2998                 addr = offset + i;
2999
3000                 memcpy(&data, buf + i, 4);
3001
3002                 /*
3003                  * The SEEPROM interface expects the data to always be opposite
3004                  * the native endian format.  We accomplish this by reversing
3005                  * all the operations that would have been performed on the
3006                  * data from a call to tg3_nvram_read_be32().
3007                  */
3008                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3009
3010                 val = tr32(GRC_EEPROM_ADDR);
3011                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3012
3013                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3014                         EEPROM_ADDR_READ);
3015                 tw32(GRC_EEPROM_ADDR, val |
3016                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3017                         (addr & EEPROM_ADDR_ADDR_MASK) |
3018                         EEPROM_ADDR_START |
3019                         EEPROM_ADDR_WRITE);
3020
3021                 for (j = 0; j < 1000; j++) {
3022                         val = tr32(GRC_EEPROM_ADDR);
3023
3024                         if (val & EEPROM_ADDR_COMPLETE)
3025                                 break;
3026                         msleep(1);
3027                 }
3028                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3029                         rc = -EBUSY;
3030                         break;
3031                 }
3032         }
3033
3034         return rc;
3035 }
3036
3037 /* offset and length are dword aligned */
3038 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3039                 u8 *buf)
3040 {
3041         int ret = 0;
3042         u32 pagesize = tp->nvram_pagesize;
3043         u32 pagemask = pagesize - 1;
3044         u32 nvram_cmd;
3045         u8 *tmp;
3046
3047         tmp = kmalloc(pagesize, GFP_KERNEL);
3048         if (tmp == NULL)
3049                 return -ENOMEM;
3050
3051         while (len) {
3052                 int j;
3053                 u32 phy_addr, page_off, size;
3054
3055                 phy_addr = offset & ~pagemask;
3056
3057                 for (j = 0; j < pagesize; j += 4) {
3058                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3059                                                   (__be32 *) (tmp + j));
3060                         if (ret)
3061                                 break;
3062                 }
3063                 if (ret)
3064                         break;
3065
3066                 page_off = offset & pagemask;
3067                 size = pagesize;
3068                 if (len < size)
3069                         size = len;
3070
3071                 len -= size;
3072
3073                 memcpy(tmp + page_off, buf, size);
3074
3075                 offset = offset + (pagesize - page_off);
3076
3077                 tg3_enable_nvram_access(tp);
3078
3079                 /*
3080                  * Before we can erase the flash page, we need
3081                  * to issue a special "write enable" command.
3082                  */
3083                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3084
3085                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3086                         break;
3087
3088                 /* Erase the target page */
3089                 tw32(NVRAM_ADDR, phy_addr);
3090
3091                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3092                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3093
3094                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095                         break;
3096
3097                 /* Issue another write enable to start the write. */
3098                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3099
3100                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3101                         break;
3102
3103                 for (j = 0; j < pagesize; j += 4) {
3104                         __be32 data;
3105
3106                         data = *((__be32 *) (tmp + j));
3107
3108                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3109
3110                         tw32(NVRAM_ADDR, phy_addr + j);
3111
3112                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3113                                 NVRAM_CMD_WR;
3114
3115                         if (j == 0)
3116                                 nvram_cmd |= NVRAM_CMD_FIRST;
3117                         else if (j == (pagesize - 4))
3118                                 nvram_cmd |= NVRAM_CMD_LAST;
3119
3120                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3121                         if (ret)
3122                                 break;
3123                 }
3124                 if (ret)
3125                         break;
3126         }
3127
3128         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3129         tg3_nvram_exec_cmd(tp, nvram_cmd);
3130
3131         kfree(tmp);
3132
3133         return ret;
3134 }
3135
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3138                 u8 *buf)
3139 {
3140         int i, ret = 0;
3141
3142         for (i = 0; i < len; i += 4, offset += 4) {
3143                 u32 page_off, phy_addr, nvram_cmd;
3144                 __be32 data;
3145
3146                 memcpy(&data, buf + i, 4);
3147                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3148
3149                 page_off = offset % tp->nvram_pagesize;
3150
3151                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3152
3153                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3154
3155                 if (page_off == 0 || i == 0)
3156                         nvram_cmd |= NVRAM_CMD_FIRST;
3157                 if (page_off == (tp->nvram_pagesize - 4))
3158                         nvram_cmd |= NVRAM_CMD_LAST;
3159
3160                 if (i == (len - 4))
3161                         nvram_cmd |= NVRAM_CMD_LAST;
3162
3163                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3164                     !tg3_flag(tp, FLASH) ||
3165                     !tg3_flag(tp, 57765_PLUS))
3166                         tw32(NVRAM_ADDR, phy_addr);
3167
3168                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3169                     !tg3_flag(tp, 5755_PLUS) &&
3170                     (tp->nvram_jedecnum == JEDEC_ST) &&
3171                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3172                         u32 cmd;
3173
3174                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3175                         ret = tg3_nvram_exec_cmd(tp, cmd);
3176                         if (ret)
3177                                 break;
3178                 }
3179                 if (!tg3_flag(tp, FLASH)) {
3180                         /* We always do complete word writes to eeprom. */
3181                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3182                 }
3183
3184                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3185                 if (ret)
3186                         break;
3187         }
3188         return ret;
3189 }
3190
3191 /* offset and length are dword aligned */
3192 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3193 {
3194         int ret;
3195
3196         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3197                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3198                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3199                 udelay(40);
3200         }
3201
3202         if (!tg3_flag(tp, NVRAM)) {
3203                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3204         } else {
3205                 u32 grc_mode;
3206
3207                 ret = tg3_nvram_lock(tp);
3208                 if (ret)
3209                         return ret;
3210
3211                 tg3_enable_nvram_access(tp);
3212                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3213                         tw32(NVRAM_WRITE1, 0x406);
3214
3215                 grc_mode = tr32(GRC_MODE);
3216                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3217
3218                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3219                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3220                                 buf);
3221                 } else {
3222                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3223                                 buf);
3224                 }
3225
3226                 grc_mode = tr32(GRC_MODE);
3227                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3228
3229                 tg3_disable_nvram_access(tp);
3230                 tg3_nvram_unlock(tp);
3231         }
3232
3233         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3234                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3235                 udelay(40);
3236         }
3237
3238         return ret;
3239 }
3240
3241 #define RX_CPU_SCRATCH_BASE     0x30000
3242 #define RX_CPU_SCRATCH_SIZE     0x04000
3243 #define TX_CPU_SCRATCH_BASE     0x34000
3244 #define TX_CPU_SCRATCH_SIZE     0x04000
3245
3246 /* tp->lock is held. */
3247 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3248 {
3249         int i;
3250
3251         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3252
3253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3254                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3255
3256                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3257                 return 0;
3258         }
3259         if (offset == RX_CPU_BASE) {
3260                 for (i = 0; i < 10000; i++) {
3261                         tw32(offset + CPU_STATE, 0xffffffff);
3262                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3263                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3264                                 break;
3265                 }
3266
3267                 tw32(offset + CPU_STATE, 0xffffffff);
3268                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3269                 udelay(10);
3270         } else {
3271                 for (i = 0; i < 10000; i++) {
3272                         tw32(offset + CPU_STATE, 0xffffffff);
3273                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3274                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3275                                 break;
3276                 }
3277         }
3278
3279         if (i >= 10000) {
3280                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3281                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3282                 return -ENODEV;
3283         }
3284
3285         /* Clear firmware's nvram arbitration. */
3286         if (tg3_flag(tp, NVRAM))
3287                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3288         return 0;
3289 }
3290
3291 struct fw_info {
3292         unsigned int fw_base;
3293         unsigned int fw_len;
3294         const __be32 *fw_data;
3295 };
3296
3297 /* tp->lock is held. */
3298 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3299                                  u32 cpu_scratch_base, int cpu_scratch_size,
3300                                  struct fw_info *info)
3301 {
3302         int err, lock_err, i;
3303         void (*write_op)(struct tg3 *, u32, u32);
3304
3305         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3306                 netdev_err(tp->dev,
3307                            "%s: Trying to load TX cpu firmware which is 5705\n",
3308                            __func__);
3309                 return -EINVAL;
3310         }
3311
3312         if (tg3_flag(tp, 5705_PLUS))
3313                 write_op = tg3_write_mem;
3314         else
3315                 write_op = tg3_write_indirect_reg32;
3316
3317         /* It is possible that bootcode is still loading at this point.
3318          * Get the nvram lock first before halting the cpu.
3319          */
3320         lock_err = tg3_nvram_lock(tp);
3321         err = tg3_halt_cpu(tp, cpu_base);
3322         if (!lock_err)
3323                 tg3_nvram_unlock(tp);
3324         if (err)
3325                 goto out;
3326
3327         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3328                 write_op(tp, cpu_scratch_base + i, 0);
3329         tw32(cpu_base + CPU_STATE, 0xffffffff);
3330         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3331         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3332                 write_op(tp, (cpu_scratch_base +
3333                               (info->fw_base & 0xffff) +
3334                               (i * sizeof(u32))),
3335                               be32_to_cpu(info->fw_data[i]));
3336
3337         err = 0;
3338
3339 out:
3340         return err;
3341 }
3342
3343 /* tp->lock is held. */
3344 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3345 {
3346         struct fw_info info;
3347         const __be32 *fw_data;
3348         int err, i;
3349
3350         fw_data = (void *)tp->fw->data;
3351
3352         /* Firmware blob starts with version numbers, followed by
3353            start address and length. We are setting complete length.
3354            length = end_address_of_bss - start_address_of_text.
3355            Remainder is the blob to be loaded contiguously
3356            from start address. */
3357
3358         info.fw_base = be32_to_cpu(fw_data[1]);
3359         info.fw_len = tp->fw->size - 12;
3360         info.fw_data = &fw_data[3];
3361
3362         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3363                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3364                                     &info);
3365         if (err)
3366                 return err;
3367
3368         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3369                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3370                                     &info);
3371         if (err)
3372                 return err;
3373
3374         /* Now startup only the RX cpu. */
3375         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3376         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3377
3378         for (i = 0; i < 5; i++) {
3379                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3380                         break;
3381                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3382                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3383                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3384                 udelay(1000);
3385         }
3386         if (i >= 5) {
3387                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3388                            "should be %08x\n", __func__,
3389                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3390                 return -ENODEV;
3391         }
3392         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3393         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3394
3395         return 0;
3396 }
3397
3398 /* tp->lock is held. */
3399 static int tg3_load_tso_firmware(struct tg3 *tp)
3400 {
3401         struct fw_info info;
3402         const __be32 *fw_data;
3403         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3404         int err, i;
3405
3406         if (tg3_flag(tp, HW_TSO_1) ||
3407             tg3_flag(tp, HW_TSO_2) ||
3408             tg3_flag(tp, HW_TSO_3))
3409                 return 0;
3410
3411         fw_data = (void *)tp->fw->data;
3412
3413         /* Firmware blob starts with version numbers, followed by
3414            start address and length. We are setting complete length.
3415            length = end_address_of_bss - start_address_of_text.
3416            Remainder is the blob to be loaded contiguously
3417            from start address. */
3418
3419         info.fw_base = be32_to_cpu(fw_data[1]);
3420         cpu_scratch_size = tp->fw_len;
3421         info.fw_len = tp->fw->size - 12;
3422         info.fw_data = &fw_data[3];
3423
3424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3425                 cpu_base = RX_CPU_BASE;
3426                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3427         } else {
3428                 cpu_base = TX_CPU_BASE;
3429                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3430                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3431         }
3432
3433         err = tg3_load_firmware_cpu(tp, cpu_base,
3434                                     cpu_scratch_base, cpu_scratch_size,
3435                                     &info);
3436         if (err)
3437                 return err;
3438
3439         /* Now startup the cpu. */
3440         tw32(cpu_base + CPU_STATE, 0xffffffff);
3441         tw32_f(cpu_base + CPU_PC, info.fw_base);
3442
3443         for (i = 0; i < 5; i++) {
3444                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3445                         break;
3446                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3447                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3448                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3449                 udelay(1000);
3450         }
3451         if (i >= 5) {
3452                 netdev_err(tp->dev,
3453                            "%s fails to set CPU PC, is %08x should be %08x\n",
3454                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3455                 return -ENODEV;
3456         }
3457         tw32(cpu_base + CPU_STATE, 0xffffffff);
3458         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3459         return 0;
3460 }
3461
3462
3463 /* tp->lock is held. */
3464 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3465 {
3466         u32 addr_high, addr_low;
3467         int i;
3468
3469         addr_high = ((tp->dev->dev_addr[0] << 8) |
3470                      tp->dev->dev_addr[1]);
3471         addr_low = ((tp->dev->dev_addr[2] << 24) |
3472                     (tp->dev->dev_addr[3] << 16) |
3473                     (tp->dev->dev_addr[4] <<  8) |
3474                     (tp->dev->dev_addr[5] <<  0));
3475         for (i = 0; i < 4; i++) {
3476                 if (i == 1 && skip_mac_1)
3477                         continue;
3478                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3479                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3480         }
3481
3482         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3483             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3484                 for (i = 0; i < 12; i++) {
3485                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3486                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3487                 }
3488         }
3489
3490         addr_high = (tp->dev->dev_addr[0] +
3491                      tp->dev->dev_addr[1] +
3492                      tp->dev->dev_addr[2] +
3493                      tp->dev->dev_addr[3] +
3494                      tp->dev->dev_addr[4] +
3495                      tp->dev->dev_addr[5]) &
3496                 TX_BACKOFF_SEED_MASK;
3497         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3498 }
3499
3500 static void tg3_enable_register_access(struct tg3 *tp)
3501 {
3502         /*
3503          * Make sure register accesses (indirect or otherwise) will function
3504          * correctly.
3505          */
3506         pci_write_config_dword(tp->pdev,
3507                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3508 }
3509
3510 static int tg3_power_up(struct tg3 *tp)
3511 {
3512         int err;
3513
3514         tg3_enable_register_access(tp);
3515
3516         err = pci_set_power_state(tp->pdev, PCI_D0);
3517         if (!err) {
3518                 /* Switch out of Vaux if it is a NIC */
3519                 tg3_pwrsrc_switch_to_vmain(tp);
3520         } else {
3521                 netdev_err(tp->dev, "Transition to D0 failed\n");
3522         }
3523
3524         return err;
3525 }
3526
3527 static int tg3_setup_phy(struct tg3 *, int);
3528
3529 static int tg3_power_down_prepare(struct tg3 *tp)
3530 {
3531         u32 misc_host_ctrl;
3532         bool device_should_wake, do_low_power;
3533
3534         tg3_enable_register_access(tp);
3535
3536         /* Restore the CLKREQ setting. */
3537         if (tg3_flag(tp, CLKREQ_BUG)) {
3538                 u16 lnkctl;
3539
3540                 pci_read_config_word(tp->pdev,
3541                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3542                                      &lnkctl);
3543                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3544                 pci_write_config_word(tp->pdev,
3545                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3546                                       lnkctl);
3547         }
3548
3549         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3550         tw32(TG3PCI_MISC_HOST_CTRL,
3551              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3552
3553         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3554                              tg3_flag(tp, WOL_ENABLE);
3555
3556         if (tg3_flag(tp, USE_PHYLIB)) {
3557                 do_low_power = false;
3558                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3559                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3560                         struct phy_device *phydev;
3561                         u32 phyid, advertising;
3562
3563                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3564
3565                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3566
3567                         tp->link_config.speed = phydev->speed;
3568                         tp->link_config.duplex = phydev->duplex;
3569                         tp->link_config.autoneg = phydev->autoneg;
3570                         tp->link_config.advertising = phydev->advertising;
3571
3572                         advertising = ADVERTISED_TP |
3573                                       ADVERTISED_Pause |
3574                                       ADVERTISED_Autoneg |
3575                                       ADVERTISED_10baseT_Half;
3576
3577                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3578                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3579                                         advertising |=
3580                                                 ADVERTISED_100baseT_Half |
3581                                                 ADVERTISED_100baseT_Full |
3582                                                 ADVERTISED_10baseT_Full;
3583                                 else
3584                                         advertising |= ADVERTISED_10baseT_Full;
3585                         }
3586
3587                         phydev->advertising = advertising;
3588
3589                         phy_start_aneg(phydev);
3590
3591                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3592                         if (phyid != PHY_ID_BCMAC131) {
3593                                 phyid &= PHY_BCM_OUI_MASK;
3594                                 if (phyid == PHY_BCM_OUI_1 ||
3595                                     phyid == PHY_BCM_OUI_2 ||
3596                                     phyid == PHY_BCM_OUI_3)
3597                                         do_low_power = true;
3598                         }
3599                 }
3600         } else {
3601                 do_low_power = true;
3602
3603                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3604                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3605
3606                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3607                         tg3_setup_phy(tp, 0);
3608         }
3609
3610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3611                 u32 val;
3612
3613                 val = tr32(GRC_VCPU_EXT_CTRL);
3614                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3615         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3616                 int i;
3617                 u32 val;
3618
3619                 for (i = 0; i < 200; i++) {
3620                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3621                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3622                                 break;
3623                         msleep(1);
3624                 }
3625         }
3626         if (tg3_flag(tp, WOL_CAP))
3627                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3628                                                      WOL_DRV_STATE_SHUTDOWN |
3629                                                      WOL_DRV_WOL |
3630                                                      WOL_SET_MAGIC_PKT);
3631
3632         if (device_should_wake) {
3633                 u32 mac_mode;
3634
3635                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3636                         if (do_low_power &&
3637                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3638                                 tg3_phy_auxctl_write(tp,
3639                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3640                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3641                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3642                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3643                                 udelay(40);
3644                         }
3645
3646                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3647                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3648                         else
3649                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3650
3651                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3652                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3653                             ASIC_REV_5700) {
3654                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3655                                              SPEED_100 : SPEED_10;
3656                                 if (tg3_5700_link_polarity(tp, speed))
3657                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3658                                 else
3659                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3660                         }
3661                 } else {
3662                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3663                 }
3664
3665                 if (!tg3_flag(tp, 5750_PLUS))
3666                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3667
3668                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3669                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3670                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3671                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3672
3673                 if (tg3_flag(tp, ENABLE_APE))
3674                         mac_mode |= MAC_MODE_APE_TX_EN |
3675                                     MAC_MODE_APE_RX_EN |
3676                                     MAC_MODE_TDE_ENABLE;
3677
3678                 tw32_f(MAC_MODE, mac_mode);
3679                 udelay(100);
3680
3681                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3682                 udelay(10);
3683         }
3684
3685         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3686             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3687              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3688                 u32 base_val;
3689
3690                 base_val = tp->pci_clock_ctrl;
3691                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3692                              CLOCK_CTRL_TXCLK_DISABLE);
3693
3694                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3695                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3696         } else if (tg3_flag(tp, 5780_CLASS) ||
3697                    tg3_flag(tp, CPMU_PRESENT) ||
3698                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3699                 /* do nothing */
3700         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3701                 u32 newbits1, newbits2;
3702
3703                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3704                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3705                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3706                                     CLOCK_CTRL_TXCLK_DISABLE |
3707                                     CLOCK_CTRL_ALTCLK);
3708                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3709                 } else if (tg3_flag(tp, 5705_PLUS)) {
3710                         newbits1 = CLOCK_CTRL_625_CORE;
3711                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3712                 } else {
3713                         newbits1 = CLOCK_CTRL_ALTCLK;
3714                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3715                 }
3716
3717                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3718                             40);
3719
3720                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3721                             40);
3722
3723                 if (!tg3_flag(tp, 5705_PLUS)) {
3724                         u32 newbits3;
3725
3726                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3727                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3728                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3729                                             CLOCK_CTRL_TXCLK_DISABLE |
3730                                             CLOCK_CTRL_44MHZ_CORE);
3731                         } else {
3732                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3733                         }
3734
3735                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3736                                     tp->pci_clock_ctrl | newbits3, 40);
3737                 }
3738         }
3739
3740         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3741                 tg3_power_down_phy(tp, do_low_power);
3742
3743         tg3_frob_aux_power(tp, true);
3744
3745         /* Workaround for unstable PLL clock */
3746         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3747             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3748                 u32 val = tr32(0x7d00);
3749
3750                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3751                 tw32(0x7d00, val);
3752                 if (!tg3_flag(tp, ENABLE_ASF)) {
3753                         int err;
3754
3755                         err = tg3_nvram_lock(tp);
3756                         tg3_halt_cpu(tp, RX_CPU_BASE);
3757                         if (!err)
3758                                 tg3_nvram_unlock(tp);
3759                 }
3760         }
3761
3762         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3763
3764         return 0;
3765 }
3766
3767 static void tg3_power_down(struct tg3 *tp)
3768 {
3769         tg3_power_down_prepare(tp);
3770
3771         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3772         pci_set_power_state(tp->pdev, PCI_D3hot);
3773 }
3774
3775 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3776 {
3777         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3778         case MII_TG3_AUX_STAT_10HALF:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_HALF;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_10FULL:
3784                 *speed = SPEED_10;
3785                 *duplex = DUPLEX_FULL;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100HALF:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_HALF;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_100FULL:
3794                 *speed = SPEED_100;
3795                 *duplex = DUPLEX_FULL;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000HALF:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_HALF;
3801                 break;
3802
3803         case MII_TG3_AUX_STAT_1000FULL:
3804                 *speed = SPEED_1000;
3805                 *duplex = DUPLEX_FULL;
3806                 break;
3807
3808         default:
3809                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3810                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3811                                  SPEED_10;
3812                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3813                                   DUPLEX_HALF;
3814                         break;
3815                 }
3816                 *speed = SPEED_UNKNOWN;
3817                 *duplex = DUPLEX_UNKNOWN;
3818                 break;
3819         }
3820 }
3821
3822 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3823 {
3824         int err = 0;
3825         u32 val, new_adv;
3826
3827         new_adv = ADVERTISE_CSMA;
3828         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3829         new_adv |= mii_advertise_flowctrl(flowctrl);
3830
3831         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3832         if (err)
3833                 goto done;
3834
3835         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3836                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3837
3838                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3839                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3840                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3841
3842                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3843                 if (err)
3844                         goto done;
3845         }
3846
3847         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3848                 goto done;
3849
3850         tw32(TG3_CPMU_EEE_MODE,
3851              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3852
3853         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3854         if (!err) {
3855                 u32 err2;
3856
3857                 val = 0;
3858                 /* Advertise 100-BaseTX EEE ability */
3859                 if (advertise & ADVERTISED_100baseT_Full)
3860                         val |= MDIO_AN_EEE_ADV_100TX;
3861                 /* Advertise 1000-BaseT EEE ability */
3862                 if (advertise & ADVERTISED_1000baseT_Full)
3863                         val |= MDIO_AN_EEE_ADV_1000T;
3864                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3865                 if (err)
3866                         val = 0;
3867
3868                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3869                 case ASIC_REV_5717:
3870                 case ASIC_REV_57765:
3871                 case ASIC_REV_57766:
3872                 case ASIC_REV_5719:
3873                         /* If we advertised any eee advertisements above... */
3874                         if (val)
3875                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3876                                       MII_TG3_DSP_TAP26_RMRXSTO |
3877                                       MII_TG3_DSP_TAP26_OPCSINPT;
3878                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3879                         /* Fall through */
3880                 case ASIC_REV_5720:
3881                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3882                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3883                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3884                 }
3885
3886                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3887                 if (!err)
3888                         err = err2;
3889         }
3890
3891 done:
3892         return err;
3893 }
3894
3895 static void tg3_phy_copper_begin(struct tg3 *tp)
3896 {
3897         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3898             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3899                 u32 adv, fc;
3900
3901                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3902                         adv = ADVERTISED_10baseT_Half |
3903                               ADVERTISED_10baseT_Full;
3904                         if (tg3_flag(tp, WOL_SPEED_100MB))
3905                                 adv |= ADVERTISED_100baseT_Half |
3906                                        ADVERTISED_100baseT_Full;
3907
3908                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3909                 } else {
3910                         adv = tp->link_config.advertising;
3911                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3912                                 adv &= ~(ADVERTISED_1000baseT_Half |
3913                                          ADVERTISED_1000baseT_Full);
3914
3915                         fc = tp->link_config.flowctrl;
3916                 }
3917
3918                 tg3_phy_autoneg_cfg(tp, adv, fc);
3919
3920                 tg3_writephy(tp, MII_BMCR,
3921                              BMCR_ANENABLE | BMCR_ANRESTART);
3922         } else {
3923                 int i;
3924                 u32 bmcr, orig_bmcr;
3925
3926                 tp->link_config.active_speed = tp->link_config.speed;
3927                 tp->link_config.active_duplex = tp->link_config.duplex;
3928
3929                 bmcr = 0;
3930                 switch (tp->link_config.speed) {
3931                 default:
3932                 case SPEED_10:
3933                         break;
3934
3935                 case SPEED_100:
3936                         bmcr |= BMCR_SPEED100;
3937                         break;
3938
3939                 case SPEED_1000:
3940                         bmcr |= BMCR_SPEED1000;
3941                         break;
3942                 }
3943
3944                 if (tp->link_config.duplex == DUPLEX_FULL)
3945                         bmcr |= BMCR_FULLDPLX;
3946
3947                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3948                     (bmcr != orig_bmcr)) {
3949                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3950                         for (i = 0; i < 1500; i++) {
3951                                 u32 tmp;
3952
3953                                 udelay(10);
3954                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3955                                     tg3_readphy(tp, MII_BMSR, &tmp))
3956                                         continue;
3957                                 if (!(tmp & BMSR_LSTATUS)) {
3958                                         udelay(40);
3959                                         break;
3960                                 }
3961                         }
3962                         tg3_writephy(tp, MII_BMCR, bmcr);
3963                         udelay(40);
3964                 }
3965         }
3966 }
3967
3968 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3969 {
3970         int err;
3971
3972         /* Turn off tap power management. */
3973         /* Set Extended packet length bit */
3974         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3975
3976         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3977         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3978         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3979         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3980         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3981
3982         udelay(40);
3983
3984         return err;
3985 }
3986
3987 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3988 {
3989         u32 advmsk, tgtadv, advertising;
3990
3991         advertising = tp->link_config.advertising;
3992         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3993
3994         advmsk = ADVERTISE_ALL;
3995         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3996                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3997                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3998         }
3999
4000         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4001                 return false;
4002
4003         if ((*lcladv & advmsk) != tgtadv)
4004                 return false;
4005
4006         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4007                 u32 tg3_ctrl;
4008
4009                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4010
4011                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4012                         return false;
4013
4014                 if (tgtadv &&
4015                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4016                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4017                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4018                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4019                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4020                 } else {
4021                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4022                 }
4023
4024                 if (tg3_ctrl != tgtadv)
4025                         return false;
4026         }
4027
4028         return true;
4029 }
4030
4031 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4032 {
4033         u32 lpeth = 0;
4034
4035         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4036                 u32 val;
4037
4038                 if (tg3_readphy(tp, MII_STAT1000, &val))
4039                         return false;
4040
4041                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4042         }
4043
4044         if (tg3_readphy(tp, MII_LPA, rmtadv))
4045                 return false;
4046
4047         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4048         tp->link_config.rmt_adv = lpeth;
4049
4050         return true;
4051 }
4052
4053 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4054 {
4055         int current_link_up;
4056         u32 bmsr, val;
4057         u32 lcl_adv, rmt_adv;
4058         u16 current_speed;
4059         u8 current_duplex;
4060         int i, err;
4061
4062         tw32(MAC_EVENT, 0);
4063
4064         tw32_f(MAC_STATUS,
4065              (MAC_STATUS_SYNC_CHANGED |
4066               MAC_STATUS_CFG_CHANGED |
4067               MAC_STATUS_MI_COMPLETION |
4068               MAC_STATUS_LNKSTATE_CHANGED));
4069         udelay(40);
4070
4071         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4072                 tw32_f(MAC_MI_MODE,
4073                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4074                 udelay(80);
4075         }
4076
4077         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4078
4079         /* Some third-party PHYs need to be reset on link going
4080          * down.
4081          */
4082         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4083              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4084              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4085             netif_carrier_ok(tp->dev)) {
4086                 tg3_readphy(tp, MII_BMSR, &bmsr);
4087                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4088                     !(bmsr & BMSR_LSTATUS))
4089                         force_reset = 1;
4090         }
4091         if (force_reset)
4092                 tg3_phy_reset(tp);
4093
4094         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4095                 tg3_readphy(tp, MII_BMSR, &bmsr);
4096                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4097                     !tg3_flag(tp, INIT_COMPLETE))
4098                         bmsr = 0;
4099
4100                 if (!(bmsr & BMSR_LSTATUS)) {
4101                         err = tg3_init_5401phy_dsp(tp);
4102                         if (err)
4103                                 return err;
4104
4105                         tg3_readphy(tp, MII_BMSR, &bmsr);
4106                         for (i = 0; i < 1000; i++) {
4107                                 udelay(10);
4108                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4109                                     (bmsr & BMSR_LSTATUS)) {
4110                                         udelay(40);
4111                                         break;
4112                                 }
4113                         }
4114
4115                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4116                             TG3_PHY_REV_BCM5401_B0 &&
4117                             !(bmsr & BMSR_LSTATUS) &&
4118                             tp->link_config.active_speed == SPEED_1000) {
4119                                 err = tg3_phy_reset(tp);
4120                                 if (!err)
4121                                         err = tg3_init_5401phy_dsp(tp);
4122                                 if (err)
4123                                         return err;
4124                         }
4125                 }
4126         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4127                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4128                 /* 5701 {A0,B0} CRC bug workaround */
4129                 tg3_writephy(tp, 0x15, 0x0a75);
4130                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4131                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4132                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4133         }
4134
4135         /* Clear pending interrupts... */
4136         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4137         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138
4139         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4140                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4141         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4142                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4143
4144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4145             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4146                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4147                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4148                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4149                 else
4150                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4151         }
4152
4153         current_link_up = 0;
4154         current_speed = SPEED_UNKNOWN;
4155         current_duplex = DUPLEX_UNKNOWN;
4156         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4157         tp->link_config.rmt_adv = 0;
4158
4159         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4160                 err = tg3_phy_auxctl_read(tp,
4161                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4162                                           &val);
4163                 if (!err && !(val & (1 << 10))) {
4164                         tg3_phy_auxctl_write(tp,
4165                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4166                                              val | (1 << 10));
4167                         goto relink;
4168                 }
4169         }
4170
4171         bmsr = 0;
4172         for (i = 0; i < 100; i++) {
4173                 tg3_readphy(tp, MII_BMSR, &bmsr);
4174                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4175                     (bmsr & BMSR_LSTATUS))
4176                         break;
4177                 udelay(40);
4178         }
4179
4180         if (bmsr & BMSR_LSTATUS) {
4181                 u32 aux_stat, bmcr;
4182
4183                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4184                 for (i = 0; i < 2000; i++) {
4185                         udelay(10);
4186                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4187                             aux_stat)
4188                                 break;
4189                 }
4190
4191                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4192                                              &current_speed,
4193                                              &current_duplex);
4194
4195                 bmcr = 0;
4196                 for (i = 0; i < 200; i++) {
4197                         tg3_readphy(tp, MII_BMCR, &bmcr);
4198                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4199                                 continue;
4200                         if (bmcr && bmcr != 0x7fff)
4201                                 break;
4202                         udelay(10);
4203                 }
4204
4205                 lcl_adv = 0;
4206                 rmt_adv = 0;
4207
4208                 tp->link_config.active_speed = current_speed;
4209                 tp->link_config.active_duplex = current_duplex;
4210
4211                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4212                         if ((bmcr & BMCR_ANENABLE) &&
4213                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4214                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4215                                 current_link_up = 1;
4216                 } else {
4217                         if (!(bmcr & BMCR_ANENABLE) &&
4218                             tp->link_config.speed == current_speed &&
4219                             tp->link_config.duplex == current_duplex &&
4220                             tp->link_config.flowctrl ==
4221                             tp->link_config.active_flowctrl) {
4222                                 current_link_up = 1;
4223                         }
4224                 }
4225
4226                 if (current_link_up == 1 &&
4227                     tp->link_config.active_duplex == DUPLEX_FULL) {
4228                         u32 reg, bit;
4229
4230                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4231                                 reg = MII_TG3_FET_GEN_STAT;
4232                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4233                         } else {
4234                                 reg = MII_TG3_EXT_STAT;
4235                                 bit = MII_TG3_EXT_STAT_MDIX;
4236                         }
4237
4238                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4239                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4240
4241                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4242                 }
4243         }
4244
4245 relink:
4246         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4247                 tg3_phy_copper_begin(tp);
4248
4249                 tg3_readphy(tp, MII_BMSR, &bmsr);
4250                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4251                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4252                         current_link_up = 1;
4253         }
4254
4255         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4256         if (current_link_up == 1) {
4257                 if (tp->link_config.active_speed == SPEED_100 ||
4258                     tp->link_config.active_speed == SPEED_10)
4259                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4260                 else
4261                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4262         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4263                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4264         else
4265                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4266
4267         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4268         if (tp->link_config.active_duplex == DUPLEX_HALF)
4269                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4270
4271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4272                 if (current_link_up == 1 &&
4273                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4274                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4275                 else
4276                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4277         }
4278
4279         /* ??? Without this setting Netgear GA302T PHY does not
4280          * ??? send/receive packets...
4281          */
4282         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4283             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4284                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4285                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4286                 udelay(80);
4287         }
4288
4289         tw32_f(MAC_MODE, tp->mac_mode);
4290         udelay(40);
4291
4292         tg3_phy_eee_adjust(tp, current_link_up);
4293
4294         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4295                 /* Polled via timer. */
4296                 tw32_f(MAC_EVENT, 0);
4297         } else {
4298                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4299         }
4300         udelay(40);
4301
4302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4303             current_link_up == 1 &&
4304             tp->link_config.active_speed == SPEED_1000 &&
4305             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4306                 udelay(120);
4307                 tw32_f(MAC_STATUS,
4308                      (MAC_STATUS_SYNC_CHANGED |
4309                       MAC_STATUS_CFG_CHANGED));
4310                 udelay(40);
4311                 tg3_write_mem(tp,
4312                               NIC_SRAM_FIRMWARE_MBOX,
4313                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4314         }
4315
4316         /* Prevent send BD corruption. */
4317         if (tg3_flag(tp, CLKREQ_BUG)) {
4318                 u16 oldlnkctl, newlnkctl;
4319
4320                 pci_read_config_word(tp->pdev,
4321                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4322                                      &oldlnkctl);
4323                 if (tp->link_config.active_speed == SPEED_100 ||
4324                     tp->link_config.active_speed == SPEED_10)
4325                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4326                 else
4327                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4328                 if (newlnkctl != oldlnkctl)
4329                         pci_write_config_word(tp->pdev,
4330                                               pci_pcie_cap(tp->pdev) +
4331                                               PCI_EXP_LNKCTL, newlnkctl);
4332         }
4333
4334         if (current_link_up != netif_carrier_ok(tp->dev)) {
4335                 if (current_link_up)
4336                         netif_carrier_on(tp->dev);
4337                 else
4338                         netif_carrier_off(tp->dev);
4339                 tg3_link_report(tp);
4340         }
4341
4342         return 0;
4343 }
4344
4345 struct tg3_fiber_aneginfo {
4346         int state;
4347 #define ANEG_STATE_UNKNOWN              0
4348 #define ANEG_STATE_AN_ENABLE            1
4349 #define ANEG_STATE_RESTART_INIT         2
4350 #define ANEG_STATE_RESTART              3
4351 #define ANEG_STATE_DISABLE_LINK_OK      4
4352 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4353 #define ANEG_STATE_ABILITY_DETECT       6
4354 #define ANEG_STATE_ACK_DETECT_INIT      7
4355 #define ANEG_STATE_ACK_DETECT           8
4356 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4357 #define ANEG_STATE_COMPLETE_ACK         10
4358 #define ANEG_STATE_IDLE_DETECT_INIT     11
4359 #define ANEG_STATE_IDLE_DETECT          12
4360 #define ANEG_STATE_LINK_OK              13
4361 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4362 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4363
4364         u32 flags;
4365 #define MR_AN_ENABLE            0x00000001
4366 #define MR_RESTART_AN           0x00000002
4367 #define MR_AN_COMPLETE          0x00000004
4368 #define MR_PAGE_RX              0x00000008
4369 #define MR_NP_LOADED            0x00000010
4370 #define MR_TOGGLE_TX            0x00000020
4371 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4372 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4373 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4374 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4375 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4376 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4377 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4378 #define MR_TOGGLE_RX            0x00002000
4379 #define MR_NP_RX                0x00004000
4380
4381 #define MR_LINK_OK              0x80000000
4382
4383         unsigned long link_time, cur_time;
4384
4385         u32 ability_match_cfg;
4386         int ability_match_count;
4387
4388         char ability_match, idle_match, ack_match;
4389
4390         u32 txconfig, rxconfig;
4391 #define ANEG_CFG_NP             0x00000080
4392 #define ANEG_CFG_ACK            0x00000040
4393 #define ANEG_CFG_RF2            0x00000020
4394 #define ANEG_CFG_RF1            0x00000010
4395 #define ANEG_CFG_PS2            0x00000001
4396 #define ANEG_CFG_PS1            0x00008000
4397 #define ANEG_CFG_HD             0x00004000
4398 #define ANEG_CFG_FD             0x00002000
4399 #define ANEG_CFG_INVAL          0x00001f06
4400
4401 };
4402 #define ANEG_OK         0
4403 #define ANEG_DONE       1
4404 #define ANEG_TIMER_ENAB 2
4405 #define ANEG_FAILED     -1
4406
4407 #define ANEG_STATE_SETTLE_TIME  10000
4408
4409 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4410                                    struct tg3_fiber_aneginfo *ap)
4411 {
4412         u16 flowctrl;
4413         unsigned long delta;
4414         u32 rx_cfg_reg;
4415         int ret;
4416
4417         if (ap->state == ANEG_STATE_UNKNOWN) {
4418                 ap->rxconfig = 0;
4419                 ap->link_time = 0;
4420                 ap->cur_time = 0;
4421                 ap->ability_match_cfg = 0;
4422                 ap->ability_match_count = 0;
4423                 ap->ability_match = 0;
4424                 ap->idle_match = 0;
4425                 ap->ack_match = 0;
4426         }
4427         ap->cur_time++;
4428
4429         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4430                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4431
4432                 if (rx_cfg_reg != ap->ability_match_cfg) {
4433                         ap->ability_match_cfg = rx_cfg_reg;
4434                         ap->ability_match = 0;
4435                         ap->ability_match_count = 0;
4436                 } else {
4437                         if (++ap->ability_match_count > 1) {
4438                                 ap->ability_match = 1;
4439                                 ap->ability_match_cfg = rx_cfg_reg;
4440                         }
4441                 }
4442                 if (rx_cfg_reg & ANEG_CFG_ACK)
4443                         ap->ack_match = 1;
4444                 else
4445                         ap->ack_match = 0;
4446
4447                 ap->idle_match = 0;
4448         } else {
4449                 ap->idle_match = 1;
4450                 ap->ability_match_cfg = 0;
4451                 ap->ability_match_count = 0;
4452                 ap->ability_match = 0;
4453                 ap->ack_match = 0;
4454
4455                 rx_cfg_reg = 0;
4456         }
4457
4458         ap->rxconfig = rx_cfg_reg;
4459         ret = ANEG_OK;
4460
4461         switch (ap->state) {
4462         case ANEG_STATE_UNKNOWN:
4463                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4464                         ap->state = ANEG_STATE_AN_ENABLE;
4465
4466                 /* fallthru */
4467         case ANEG_STATE_AN_ENABLE:
4468                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4469                 if (ap->flags & MR_AN_ENABLE) {
4470                         ap->link_time = 0;
4471                         ap->cur_time = 0;
4472                         ap->ability_match_cfg = 0;
4473                         ap->ability_match_count = 0;
4474                         ap->ability_match = 0;
4475                         ap->idle_match = 0;
4476                         ap->ack_match = 0;
4477
4478                         ap->state = ANEG_STATE_RESTART_INIT;
4479                 } else {
4480                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4481                 }
4482                 break;
4483
4484         case ANEG_STATE_RESTART_INIT:
4485                 ap->link_time = ap->cur_time;
4486                 ap->flags &= ~(MR_NP_LOADED);
4487                 ap->txconfig = 0;
4488                 tw32(MAC_TX_AUTO_NEG, 0);
4489                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4490                 tw32_f(MAC_MODE, tp->mac_mode);
4491                 udelay(40);
4492
4493                 ret = ANEG_TIMER_ENAB;
4494                 ap->state = ANEG_STATE_RESTART;
4495
4496                 /* fallthru */
4497         case ANEG_STATE_RESTART:
4498                 delta = ap->cur_time - ap->link_time;
4499                 if (delta > ANEG_STATE_SETTLE_TIME)
4500                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4501                 else
4502                         ret = ANEG_TIMER_ENAB;
4503                 break;
4504
4505         case ANEG_STATE_DISABLE_LINK_OK:
4506                 ret = ANEG_DONE;
4507                 break;
4508
4509         case ANEG_STATE_ABILITY_DETECT_INIT:
4510                 ap->flags &= ~(MR_TOGGLE_TX);
4511                 ap->txconfig = ANEG_CFG_FD;
4512                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4513                 if (flowctrl & ADVERTISE_1000XPAUSE)
4514                         ap->txconfig |= ANEG_CFG_PS1;
4515                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4516                         ap->txconfig |= ANEG_CFG_PS2;
4517                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4518                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4519                 tw32_f(MAC_MODE, tp->mac_mode);
4520                 udelay(40);
4521
4522                 ap->state = ANEG_STATE_ABILITY_DETECT;
4523                 break;
4524
4525         case ANEG_STATE_ABILITY_DETECT:
4526                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4527                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4528                 break;
4529
4530         case ANEG_STATE_ACK_DETECT_INIT:
4531                 ap->txconfig |= ANEG_CFG_ACK;
4532                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4533                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4534                 tw32_f(MAC_MODE, tp->mac_mode);
4535                 udelay(40);
4536
4537                 ap->state = ANEG_STATE_ACK_DETECT;
4538
4539                 /* fallthru */
4540         case ANEG_STATE_ACK_DETECT:
4541                 if (ap->ack_match != 0) {
4542                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4543                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4544                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4545                         } else {
4546                                 ap->state = ANEG_STATE_AN_ENABLE;
4547                         }
4548                 } else if (ap->ability_match != 0 &&
4549                            ap->rxconfig == 0) {
4550                         ap->state = ANEG_STATE_AN_ENABLE;
4551                 }
4552                 break;
4553
4554         case ANEG_STATE_COMPLETE_ACK_INIT:
4555                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4556                         ret = ANEG_FAILED;
4557                         break;
4558                 }
4559                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4560                                MR_LP_ADV_HALF_DUPLEX |
4561                                MR_LP_ADV_SYM_PAUSE |
4562                                MR_LP_ADV_ASYM_PAUSE |
4563                                MR_LP_ADV_REMOTE_FAULT1 |
4564                                MR_LP_ADV_REMOTE_FAULT2 |
4565                                MR_LP_ADV_NEXT_PAGE |
4566                                MR_TOGGLE_RX |
4567                                MR_NP_RX);
4568                 if (ap->rxconfig & ANEG_CFG_FD)
4569                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4570                 if (ap->rxconfig & ANEG_CFG_HD)
4571                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4572                 if (ap->rxconfig & ANEG_CFG_PS1)
4573                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4574                 if (ap->rxconfig & ANEG_CFG_PS2)
4575                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4576                 if (ap->rxconfig & ANEG_CFG_RF1)
4577                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4578                 if (ap->rxconfig & ANEG_CFG_RF2)
4579                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4580                 if (ap->rxconfig & ANEG_CFG_NP)
4581                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4582
4583                 ap->link_time = ap->cur_time;
4584
4585                 ap->flags ^= (MR_TOGGLE_TX);
4586                 if (ap->rxconfig & 0x0008)
4587                         ap->flags |= MR_TOGGLE_RX;
4588                 if (ap->rxconfig & ANEG_CFG_NP)
4589                         ap->flags |= MR_NP_RX;
4590                 ap->flags |= MR_PAGE_RX;
4591
4592                 ap->state = ANEG_STATE_COMPLETE_ACK;
4593                 ret = ANEG_TIMER_ENAB;
4594                 break;
4595
4596         case ANEG_STATE_COMPLETE_ACK:
4597                 if (ap->ability_match != 0 &&
4598                     ap->rxconfig == 0) {
4599                         ap->state = ANEG_STATE_AN_ENABLE;
4600                         break;
4601                 }
4602                 delta = ap->cur_time - ap->link_time;
4603                 if (delta > ANEG_STATE_SETTLE_TIME) {
4604                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4605                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4606                         } else {
4607                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4608                                     !(ap->flags & MR_NP_RX)) {
4609                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4610                                 } else {
4611                                         ret = ANEG_FAILED;
4612                                 }
4613                         }
4614                 }
4615                 break;
4616
4617         case ANEG_STATE_IDLE_DETECT_INIT:
4618                 ap->link_time = ap->cur_time;
4619                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4620                 tw32_f(MAC_MODE, tp->mac_mode);
4621                 udelay(40);
4622
4623                 ap->state = ANEG_STATE_IDLE_DETECT;
4624                 ret = ANEG_TIMER_ENAB;
4625                 break;
4626
4627         case ANEG_STATE_IDLE_DETECT:
4628                 if (ap->ability_match != 0 &&
4629                     ap->rxconfig == 0) {
4630                         ap->state = ANEG_STATE_AN_ENABLE;
4631                         break;
4632                 }
4633                 delta = ap->cur_time - ap->link_time;
4634                 if (delta > ANEG_STATE_SETTLE_TIME) {
4635                         /* XXX another gem from the Broadcom driver :( */
4636                         ap->state = ANEG_STATE_LINK_OK;
4637                 }
4638                 break;
4639
4640         case ANEG_STATE_LINK_OK:
4641                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4642                 ret = ANEG_DONE;
4643                 break;
4644
4645         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4646                 /* ??? unimplemented */
4647                 break;
4648
4649         case ANEG_STATE_NEXT_PAGE_WAIT:
4650                 /* ??? unimplemented */
4651                 break;
4652
4653         default:
4654                 ret = ANEG_FAILED;
4655                 break;
4656         }
4657
4658         return ret;
4659 }
4660
4661 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4662 {
4663         int res = 0;
4664         struct tg3_fiber_aneginfo aninfo;
4665         int status = ANEG_FAILED;
4666         unsigned int tick;
4667         u32 tmp;
4668
4669         tw32_f(MAC_TX_AUTO_NEG, 0);
4670
4671         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4672         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4673         udelay(40);
4674
4675         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4676         udelay(40);
4677
4678         memset(&aninfo, 0, sizeof(aninfo));
4679         aninfo.flags |= MR_AN_ENABLE;
4680         aninfo.state = ANEG_STATE_UNKNOWN;
4681         aninfo.cur_time = 0;
4682         tick = 0;
4683         while (++tick < 195000) {
4684                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4685                 if (status == ANEG_DONE || status == ANEG_FAILED)
4686                         break;
4687
4688                 udelay(1);
4689         }
4690
4691         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4692         tw32_f(MAC_MODE, tp->mac_mode);
4693         udelay(40);
4694
4695         *txflags = aninfo.txconfig;
4696         *rxflags = aninfo.flags;
4697
4698         if (status == ANEG_DONE &&
4699             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4700                              MR_LP_ADV_FULL_DUPLEX)))
4701                 res = 1;
4702
4703         return res;
4704 }
4705
4706 static void tg3_init_bcm8002(struct tg3 *tp)
4707 {
4708         u32 mac_status = tr32(MAC_STATUS);
4709         int i;
4710
4711         /* Reset when initting first time or we have a link. */
4712         if (tg3_flag(tp, INIT_COMPLETE) &&
4713             !(mac_status & MAC_STATUS_PCS_SYNCED))
4714                 return;
4715
4716         /* Set PLL lock range. */
4717         tg3_writephy(tp, 0x16, 0x8007);
4718
4719         /* SW reset */
4720         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4721
4722         /* Wait for reset to complete. */
4723         /* XXX schedule_timeout() ... */
4724         for (i = 0; i < 500; i++)
4725                 udelay(10);
4726
4727         /* Config mode; select PMA/Ch 1 regs. */
4728         tg3_writephy(tp, 0x10, 0x8411);
4729
4730         /* Enable auto-lock and comdet, select txclk for tx. */
4731         tg3_writephy(tp, 0x11, 0x0a10);
4732
4733         tg3_writephy(tp, 0x18, 0x00a0);
4734         tg3_writephy(tp, 0x16, 0x41ff);
4735
4736         /* Assert and deassert POR. */
4737         tg3_writephy(tp, 0x13, 0x0400);
4738         udelay(40);
4739         tg3_writephy(tp, 0x13, 0x0000);
4740
4741         tg3_writephy(tp, 0x11, 0x0a50);
4742         udelay(40);
4743         tg3_writephy(tp, 0x11, 0x0a10);
4744
4745         /* Wait for signal to stabilize */
4746         /* XXX schedule_timeout() ... */
4747         for (i = 0; i < 15000; i++)
4748                 udelay(10);
4749
4750         /* Deselect the channel register so we can read the PHYID
4751          * later.
4752          */
4753         tg3_writephy(tp, 0x10, 0x8011);
4754 }
4755
4756 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4757 {
4758         u16 flowctrl;
4759         u32 sg_dig_ctrl, sg_dig_status;
4760         u32 serdes_cfg, expected_sg_dig_ctrl;
4761         int workaround, port_a;
4762         int current_link_up;
4763
4764         serdes_cfg = 0;
4765         expected_sg_dig_ctrl = 0;
4766         workaround = 0;
4767         port_a = 1;
4768         current_link_up = 0;
4769
4770         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4771             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4772                 workaround = 1;
4773                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4774                         port_a = 0;
4775
4776                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4777                 /* preserve bits 20-23 for voltage regulator */
4778                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4779         }
4780
4781         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4782
4783         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4784                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4785                         if (workaround) {
4786                                 u32 val = serdes_cfg;
4787
4788                                 if (port_a)
4789                                         val |= 0xc010000;
4790                                 else
4791                                         val |= 0x4010000;
4792                                 tw32_f(MAC_SERDES_CFG, val);
4793                         }
4794
4795                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4796                 }
4797                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4798                         tg3_setup_flow_control(tp, 0, 0);
4799                         current_link_up = 1;
4800                 }
4801                 goto out;
4802         }
4803
4804         /* Want auto-negotiation.  */
4805         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4806
4807         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4808         if (flowctrl & ADVERTISE_1000XPAUSE)
4809                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4810         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4811                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4812
4813         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4814                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4815                     tp->serdes_counter &&
4816                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4817                                     MAC_STATUS_RCVD_CFG)) ==
4818                      MAC_STATUS_PCS_SYNCED)) {
4819                         tp->serdes_counter--;
4820                         current_link_up = 1;
4821                         goto out;
4822                 }
4823 restart_autoneg:
4824                 if (workaround)
4825                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4826                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4827                 udelay(5);
4828                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4829
4830                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4831                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4832         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4833                                  MAC_STATUS_SIGNAL_DET)) {
4834                 sg_dig_status = tr32(SG_DIG_STATUS);
4835                 mac_status = tr32(MAC_STATUS);
4836
4837                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4838                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4839                         u32 local_adv = 0, remote_adv = 0;
4840
4841                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4842                                 local_adv |= ADVERTISE_1000XPAUSE;
4843                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4844                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4845
4846                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4847                                 remote_adv |= LPA_1000XPAUSE;
4848                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4849                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4850
4851                         tp->link_config.rmt_adv =
4852                                            mii_adv_to_ethtool_adv_x(remote_adv);
4853
4854                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4855                         current_link_up = 1;
4856                         tp->serdes_counter = 0;
4857                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4858                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4859                         if (tp->serdes_counter)
4860                                 tp->serdes_counter--;
4861                         else {
4862                                 if (workaround) {
4863                                         u32 val = serdes_cfg;
4864
4865                                         if (port_a)
4866                                                 val |= 0xc010000;
4867                                         else
4868                                                 val |= 0x4010000;
4869
4870                                         tw32_f(MAC_SERDES_CFG, val);
4871                                 }
4872
4873                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4874                                 udelay(40);
4875
4876                                 /* Link parallel detection - link is up */
4877                                 /* only if we have PCS_SYNC and not */
4878                                 /* receiving config code words */
4879                                 mac_status = tr32(MAC_STATUS);
4880                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4881                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4882                                         tg3_setup_flow_control(tp, 0, 0);
4883                                         current_link_up = 1;
4884                                         tp->phy_flags |=
4885                                                 TG3_PHYFLG_PARALLEL_DETECT;
4886                                         tp->serdes_counter =
4887                                                 SERDES_PARALLEL_DET_TIMEOUT;
4888                                 } else
4889                                         goto restart_autoneg;
4890                         }
4891                 }
4892         } else {
4893                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4894                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4895         }
4896
4897 out:
4898         return current_link_up;
4899 }
4900
4901 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4902 {
4903         int current_link_up = 0;
4904
4905         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4906                 goto out;
4907
4908         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4909                 u32 txflags, rxflags;
4910                 int i;
4911
4912                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4913                         u32 local_adv = 0, remote_adv = 0;
4914
4915                         if (txflags & ANEG_CFG_PS1)
4916                                 local_adv |= ADVERTISE_1000XPAUSE;
4917                         if (txflags & ANEG_CFG_PS2)
4918                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4919
4920                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4921                                 remote_adv |= LPA_1000XPAUSE;
4922                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4923                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4924
4925                         tp->link_config.rmt_adv =
4926                                            mii_adv_to_ethtool_adv_x(remote_adv);
4927
4928                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4929
4930                         current_link_up = 1;
4931                 }
4932                 for (i = 0; i < 30; i++) {
4933                         udelay(20);
4934                         tw32_f(MAC_STATUS,
4935                                (MAC_STATUS_SYNC_CHANGED |
4936                                 MAC_STATUS_CFG_CHANGED));
4937                         udelay(40);
4938                         if ((tr32(MAC_STATUS) &
4939                              (MAC_STATUS_SYNC_CHANGED |
4940                               MAC_STATUS_CFG_CHANGED)) == 0)
4941                                 break;
4942                 }
4943
4944                 mac_status = tr32(MAC_STATUS);
4945                 if (current_link_up == 0 &&
4946                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4947                     !(mac_status & MAC_STATUS_RCVD_CFG))
4948                         current_link_up = 1;
4949         } else {
4950                 tg3_setup_flow_control(tp, 0, 0);
4951
4952                 /* Forcing 1000FD link up. */
4953                 current_link_up = 1;
4954
4955                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4956                 udelay(40);
4957
4958                 tw32_f(MAC_MODE, tp->mac_mode);
4959                 udelay(40);
4960         }
4961
4962 out:
4963         return current_link_up;
4964 }
4965
4966 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4967 {
4968         u32 orig_pause_cfg;
4969         u16 orig_active_speed;
4970         u8 orig_active_duplex;
4971         u32 mac_status;
4972         int current_link_up;
4973         int i;
4974
4975         orig_pause_cfg = tp->link_config.active_flowctrl;
4976         orig_active_speed = tp->link_config.active_speed;
4977         orig_active_duplex = tp->link_config.active_duplex;
4978
4979         if (!tg3_flag(tp, HW_AUTONEG) &&
4980             netif_carrier_ok(tp->dev) &&
4981             tg3_flag(tp, INIT_COMPLETE)) {
4982                 mac_status = tr32(MAC_STATUS);
4983                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4984                                MAC_STATUS_SIGNAL_DET |
4985                                MAC_STATUS_CFG_CHANGED |
4986                                MAC_STATUS_RCVD_CFG);
4987                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4988                                    MAC_STATUS_SIGNAL_DET)) {
4989                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4990                                             MAC_STATUS_CFG_CHANGED));
4991                         return 0;
4992                 }
4993         }
4994
4995         tw32_f(MAC_TX_AUTO_NEG, 0);
4996
4997         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4998         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4999         tw32_f(MAC_MODE, tp->mac_mode);
5000         udelay(40);
5001
5002         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5003                 tg3_init_bcm8002(tp);
5004
5005         /* Enable link change event even when serdes polling.  */
5006         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5007         udelay(40);
5008
5009         current_link_up = 0;
5010         tp->link_config.rmt_adv = 0;
5011         mac_status = tr32(MAC_STATUS);
5012
5013         if (tg3_flag(tp, HW_AUTONEG))
5014                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5015         else
5016                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5017
5018         tp->napi[0].hw_status->status =
5019                 (SD_STATUS_UPDATED |
5020                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5021
5022         for (i = 0; i < 100; i++) {
5023                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5024                                     MAC_STATUS_CFG_CHANGED));
5025                 udelay(5);
5026                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5027                                          MAC_STATUS_CFG_CHANGED |
5028                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5029                         break;
5030         }
5031
5032         mac_status = tr32(MAC_STATUS);
5033         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5034                 current_link_up = 0;
5035                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5036                     tp->serdes_counter == 0) {
5037                         tw32_f(MAC_MODE, (tp->mac_mode |
5038                                           MAC_MODE_SEND_CONFIGS));
5039                         udelay(1);
5040                         tw32_f(MAC_MODE, tp->mac_mode);
5041                 }
5042         }
5043
5044         if (current_link_up == 1) {
5045                 tp->link_config.active_speed = SPEED_1000;
5046                 tp->link_config.active_duplex = DUPLEX_FULL;
5047                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5048                                     LED_CTRL_LNKLED_OVERRIDE |
5049                                     LED_CTRL_1000MBPS_ON));
5050         } else {
5051                 tp->link_config.active_speed = SPEED_UNKNOWN;
5052                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5053                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5054                                     LED_CTRL_LNKLED_OVERRIDE |
5055                                     LED_CTRL_TRAFFIC_OVERRIDE));
5056         }
5057
5058         if (current_link_up != netif_carrier_ok(tp->dev)) {
5059                 if (current_link_up)
5060                         netif_carrier_on(tp->dev);
5061                 else
5062                         netif_carrier_off(tp->dev);
5063                 tg3_link_report(tp);
5064         } else {
5065                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5066                 if (orig_pause_cfg != now_pause_cfg ||
5067                     orig_active_speed != tp->link_config.active_speed ||
5068                     orig_active_duplex != tp->link_config.active_duplex)
5069                         tg3_link_report(tp);
5070         }
5071
5072         return 0;
5073 }
5074
5075 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5076 {
5077         int current_link_up, err = 0;
5078         u32 bmsr, bmcr;
5079         u16 current_speed;
5080         u8 current_duplex;
5081         u32 local_adv, remote_adv;
5082
5083         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5084         tw32_f(MAC_MODE, tp->mac_mode);
5085         udelay(40);
5086
5087         tw32(MAC_EVENT, 0);
5088
5089         tw32_f(MAC_STATUS,
5090              (MAC_STATUS_SYNC_CHANGED |
5091               MAC_STATUS_CFG_CHANGED |
5092               MAC_STATUS_MI_COMPLETION |
5093               MAC_STATUS_LNKSTATE_CHANGED));
5094         udelay(40);
5095
5096         if (force_reset)
5097                 tg3_phy_reset(tp);
5098
5099         current_link_up = 0;
5100         current_speed = SPEED_UNKNOWN;
5101         current_duplex = DUPLEX_UNKNOWN;
5102         tp->link_config.rmt_adv = 0;
5103
5104         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5105         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5107                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5108                         bmsr |= BMSR_LSTATUS;
5109                 else
5110                         bmsr &= ~BMSR_LSTATUS;
5111         }
5112
5113         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5114
5115         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5116             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5117                 /* do nothing, just check for link up at the end */
5118         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5119                 u32 adv, newadv;
5120
5121                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5122                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5123                                  ADVERTISE_1000XPAUSE |
5124                                  ADVERTISE_1000XPSE_ASYM |
5125                                  ADVERTISE_SLCT);
5126
5127                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5128                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5129
5130                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5131                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5132                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5133                         tg3_writephy(tp, MII_BMCR, bmcr);
5134
5135                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5136                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5137                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5138
5139                         return err;
5140                 }
5141         } else {
5142                 u32 new_bmcr;
5143
5144                 bmcr &= ~BMCR_SPEED1000;
5145                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5146
5147                 if (tp->link_config.duplex == DUPLEX_FULL)
5148                         new_bmcr |= BMCR_FULLDPLX;
5149
5150                 if (new_bmcr != bmcr) {
5151                         /* BMCR_SPEED1000 is a reserved bit that needs
5152                          * to be set on write.
5153                          */
5154                         new_bmcr |= BMCR_SPEED1000;
5155
5156                         /* Force a linkdown */
5157                         if (netif_carrier_ok(tp->dev)) {
5158                                 u32 adv;
5159
5160                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5161                                 adv &= ~(ADVERTISE_1000XFULL |
5162                                          ADVERTISE_1000XHALF |
5163                                          ADVERTISE_SLCT);
5164                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5165                                 tg3_writephy(tp, MII_BMCR, bmcr |
5166                                                            BMCR_ANRESTART |
5167                                                            BMCR_ANENABLE);
5168                                 udelay(10);
5169                                 netif_carrier_off(tp->dev);
5170                         }
5171                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5172                         bmcr = new_bmcr;
5173                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5174                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5176                             ASIC_REV_5714) {
5177                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5178                                         bmsr |= BMSR_LSTATUS;
5179                                 else
5180                                         bmsr &= ~BMSR_LSTATUS;
5181                         }
5182                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5183                 }
5184         }
5185
5186         if (bmsr & BMSR_LSTATUS) {
5187                 current_speed = SPEED_1000;
5188                 current_link_up = 1;
5189                 if (bmcr & BMCR_FULLDPLX)
5190                         current_duplex = DUPLEX_FULL;
5191                 else
5192                         current_duplex = DUPLEX_HALF;
5193
5194                 local_adv = 0;
5195                 remote_adv = 0;
5196
5197                 if (bmcr & BMCR_ANENABLE) {
5198                         u32 common;
5199
5200                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5201                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5202                         common = local_adv & remote_adv;
5203                         if (common & (ADVERTISE_1000XHALF |
5204                                       ADVERTISE_1000XFULL)) {
5205                                 if (common & ADVERTISE_1000XFULL)
5206                                         current_duplex = DUPLEX_FULL;
5207                                 else
5208                                         current_duplex = DUPLEX_HALF;
5209
5210                                 tp->link_config.rmt_adv =
5211                                            mii_adv_to_ethtool_adv_x(remote_adv);
5212                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5213                                 /* Link is up via parallel detect */
5214                         } else {
5215                                 current_link_up = 0;
5216                         }
5217                 }
5218         }
5219
5220         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5221                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5222
5223         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5224         if (tp->link_config.active_duplex == DUPLEX_HALF)
5225                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5226
5227         tw32_f(MAC_MODE, tp->mac_mode);
5228         udelay(40);
5229
5230         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5231
5232         tp->link_config.active_speed = current_speed;
5233         tp->link_config.active_duplex = current_duplex;
5234
5235         if (current_link_up != netif_carrier_ok(tp->dev)) {
5236                 if (current_link_up)
5237                         netif_carrier_on(tp->dev);
5238                 else {
5239                         netif_carrier_off(tp->dev);
5240                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5241                 }
5242                 tg3_link_report(tp);
5243         }
5244         return err;
5245 }
5246
5247 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5248 {
5249         if (tp->serdes_counter) {
5250                 /* Give autoneg time to complete. */
5251                 tp->serdes_counter--;
5252                 return;
5253         }
5254
5255         if (!netif_carrier_ok(tp->dev) &&
5256             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5257                 u32 bmcr;
5258
5259                 tg3_readphy(tp, MII_BMCR, &bmcr);
5260                 if (bmcr & BMCR_ANENABLE) {
5261                         u32 phy1, phy2;
5262
5263                         /* Select shadow register 0x1f */
5264                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5265                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5266
5267                         /* Select expansion interrupt status register */
5268                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5269                                          MII_TG3_DSP_EXP1_INT_STAT);
5270                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5271                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272
5273                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5274                                 /* We have signal detect and not receiving
5275                                  * config code words, link is up by parallel
5276                                  * detection.
5277                                  */
5278
5279                                 bmcr &= ~BMCR_ANENABLE;
5280                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5281                                 tg3_writephy(tp, MII_BMCR, bmcr);
5282                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5283                         }
5284                 }
5285         } else if (netif_carrier_ok(tp->dev) &&
5286                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5287                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5288                 u32 phy2;
5289
5290                 /* Select expansion interrupt status register */
5291                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5292                                  MII_TG3_DSP_EXP1_INT_STAT);
5293                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5294                 if (phy2 & 0x20) {
5295                         u32 bmcr;
5296
5297                         /* Config code words received, turn on autoneg. */
5298                         tg3_readphy(tp, MII_BMCR, &bmcr);
5299                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5300
5301                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5302
5303                 }
5304         }
5305 }
5306
5307 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5308 {
5309         u32 val;
5310         int err;
5311
5312         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5313                 err = tg3_setup_fiber_phy(tp, force_reset);
5314         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5315                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5316         else
5317                 err = tg3_setup_copper_phy(tp, force_reset);
5318
5319         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5320                 u32 scale;
5321
5322                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5323                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5324                         scale = 65;
5325                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5326                         scale = 6;
5327                 else
5328                         scale = 12;
5329
5330                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5331                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5332                 tw32(GRC_MISC_CFG, val);
5333         }
5334
5335         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5336               (6 << TX_LENGTHS_IPG_SHIFT);
5337         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5338                 val |= tr32(MAC_TX_LENGTHS) &
5339                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5340                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5341
5342         if (tp->link_config.active_speed == SPEED_1000 &&
5343             tp->link_config.active_duplex == DUPLEX_HALF)
5344                 tw32(MAC_TX_LENGTHS, val |
5345                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5346         else
5347                 tw32(MAC_TX_LENGTHS, val |
5348                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5349
5350         if (!tg3_flag(tp, 5705_PLUS)) {
5351                 if (netif_carrier_ok(tp->dev)) {
5352                         tw32(HOSTCC_STAT_COAL_TICKS,
5353                              tp->coal.stats_block_coalesce_usecs);
5354                 } else {
5355                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5356                 }
5357         }
5358
5359         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5360                 val = tr32(PCIE_PWR_MGMT_THRESH);
5361                 if (!netif_carrier_ok(tp->dev))
5362                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5363                               tp->pwrmgmt_thresh;
5364                 else
5365                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5366                 tw32(PCIE_PWR_MGMT_THRESH, val);
5367         }
5368
5369         return err;
5370 }
5371
5372 static inline int tg3_irq_sync(struct tg3 *tp)
5373 {
5374         return tp->irq_sync;
5375 }
5376
5377 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5378 {
5379         int i;
5380
5381         dst = (u32 *)((u8 *)dst + off);
5382         for (i = 0; i < len; i += sizeof(u32))
5383                 *dst++ = tr32(off + i);
5384 }
5385
5386 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5387 {
5388         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5389         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5390         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5391         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5392         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5393         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5394         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5395         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5396         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5397         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5398         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5399         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5400         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5401         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5402         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5403         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5404         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5405         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5406         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5407
5408         if (tg3_flag(tp, SUPPORT_MSIX))
5409                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5410
5411         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5412         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5413         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5414         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5415         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5416         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5417         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5418         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5419
5420         if (!tg3_flag(tp, 5705_PLUS)) {
5421                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5422                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5423                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5424         }
5425
5426         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5427         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5428         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5429         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5430         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5431
5432         if (tg3_flag(tp, NVRAM))
5433                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5434 }
5435
5436 static void tg3_dump_state(struct tg3 *tp)
5437 {
5438         int i;
5439         u32 *regs;
5440
5441         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5442         if (!regs) {
5443                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5444                 return;
5445         }
5446
5447         if (tg3_flag(tp, PCI_EXPRESS)) {
5448                 /* Read up to but not including private PCI registers */
5449                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5450                         regs[i / sizeof(u32)] = tr32(i);
5451         } else
5452                 tg3_dump_legacy_regs(tp, regs);
5453
5454         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5455                 if (!regs[i + 0] && !regs[i + 1] &&
5456                     !regs[i + 2] && !regs[i + 3])
5457                         continue;
5458
5459                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5460                            i * 4,
5461                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5462         }
5463
5464         kfree(regs);
5465
5466         for (i = 0; i < tp->irq_cnt; i++) {
5467                 struct tg3_napi *tnapi = &tp->napi[i];
5468
5469                 /* SW status block */
5470                 netdev_err(tp->dev,
5471                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5472                            i,
5473                            tnapi->hw_status->status,
5474                            tnapi->hw_status->status_tag,
5475                            tnapi->hw_status->rx_jumbo_consumer,
5476                            tnapi->hw_status->rx_consumer,
5477                            tnapi->hw_status->rx_mini_consumer,
5478                            tnapi->hw_status->idx[0].rx_producer,
5479                            tnapi->hw_status->idx[0].tx_consumer);
5480
5481                 netdev_err(tp->dev,
5482                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5483                            i,
5484                            tnapi->last_tag, tnapi->last_irq_tag,
5485                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5486                            tnapi->rx_rcb_ptr,
5487                            tnapi->prodring.rx_std_prod_idx,
5488                            tnapi->prodring.rx_std_cons_idx,
5489                            tnapi->prodring.rx_jmb_prod_idx,
5490                            tnapi->prodring.rx_jmb_cons_idx);
5491         }
5492 }
5493
5494 /* This is called whenever we suspect that the system chipset is re-
5495  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5496  * is bogus tx completions. We try to recover by setting the
5497  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5498  * in the workqueue.
5499  */
5500 static void tg3_tx_recover(struct tg3 *tp)
5501 {
5502         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5503                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5504
5505         netdev_warn(tp->dev,
5506                     "The system may be re-ordering memory-mapped I/O "
5507                     "cycles to the network device, attempting to recover. "
5508                     "Please report the problem to the driver maintainer "
5509                     "and include system chipset information.\n");
5510
5511         spin_lock(&tp->lock);
5512         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5513         spin_unlock(&tp->lock);
5514 }
5515
5516 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5517 {
5518         /* Tell compiler to fetch tx indices from memory. */
5519         barrier();
5520         return tnapi->tx_pending -
5521                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5522 }
5523
5524 /* Tigon3 never reports partial packet sends.  So we do not
5525  * need special logic to handle SKBs that have not had all
5526  * of their frags sent yet, like SunGEM does.
5527  */
5528 static void tg3_tx(struct tg3_napi *tnapi)
5529 {
5530         struct tg3 *tp = tnapi->tp;
5531         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5532         u32 sw_idx = tnapi->tx_cons;
5533         struct netdev_queue *txq;
5534         int index = tnapi - tp->napi;
5535         unsigned int pkts_compl = 0, bytes_compl = 0;
5536
5537         if (tg3_flag(tp, ENABLE_TSS))
5538                 index--;
5539
5540         txq = netdev_get_tx_queue(tp->dev, index);
5541
5542         while (sw_idx != hw_idx) {
5543                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5544                 struct sk_buff *skb = ri->skb;
5545                 int i, tx_bug = 0;
5546
5547                 if (unlikely(skb == NULL)) {
5548                         tg3_tx_recover(tp);
5549                         return;
5550                 }
5551
5552                 pci_unmap_single(tp->pdev,
5553                                  dma_unmap_addr(ri, mapping),
5554                                  skb_headlen(skb),
5555                                  PCI_DMA_TODEVICE);
5556
5557                 ri->skb = NULL;
5558
5559                 while (ri->fragmented) {
5560                         ri->fragmented = false;
5561                         sw_idx = NEXT_TX(sw_idx);
5562                         ri = &tnapi->tx_buffers[sw_idx];
5563                 }
5564
5565                 sw_idx = NEXT_TX(sw_idx);
5566
5567                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5568                         ri = &tnapi->tx_buffers[sw_idx];
5569                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5570                                 tx_bug = 1;
5571
5572                         pci_unmap_page(tp->pdev,
5573                                        dma_unmap_addr(ri, mapping),
5574                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5575                                        PCI_DMA_TODEVICE);
5576
5577                         while (ri->fragmented) {
5578                                 ri->fragmented = false;
5579                                 sw_idx = NEXT_TX(sw_idx);
5580                                 ri = &tnapi->tx_buffers[sw_idx];
5581                         }
5582
5583                         sw_idx = NEXT_TX(sw_idx);
5584                 }
5585
5586                 pkts_compl++;
5587                 bytes_compl += skb->len;
5588
5589                 dev_kfree_skb(skb);
5590
5591                 if (unlikely(tx_bug)) {
5592                         tg3_tx_recover(tp);
5593                         return;
5594                 }
5595         }
5596
5597         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5598
5599         tnapi->tx_cons = sw_idx;
5600
5601         /* Need to make the tx_cons update visible to tg3_start_xmit()
5602          * before checking for netif_queue_stopped().  Without the
5603          * memory barrier, there is a small possibility that tg3_start_xmit()
5604          * will miss it and cause the queue to be stopped forever.
5605          */
5606         smp_mb();
5607
5608         if (unlikely(netif_tx_queue_stopped(txq) &&
5609                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5610                 __netif_tx_lock(txq, smp_processor_id());
5611                 if (netif_tx_queue_stopped(txq) &&
5612                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5613                         netif_tx_wake_queue(txq);
5614                 __netif_tx_unlock(txq);
5615         }
5616 }
5617
5618 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5619 {
5620         if (!ri->data)
5621                 return;
5622
5623         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5624                          map_sz, PCI_DMA_FROMDEVICE);
5625         kfree(ri->data);
5626         ri->data = NULL;
5627 }
5628
5629 /* Returns size of skb allocated or < 0 on error.
5630  *
5631  * We only need to fill in the address because the other members
5632  * of the RX descriptor are invariant, see tg3_init_rings.
5633  *
5634  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5635  * posting buffers we only dirty the first cache line of the RX
5636  * descriptor (containing the address).  Whereas for the RX status
5637  * buffers the cpu only reads the last cacheline of the RX descriptor
5638  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5639  */
5640 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5641                             u32 opaque_key, u32 dest_idx_unmasked)
5642 {
5643         struct tg3_rx_buffer_desc *desc;
5644         struct ring_info *map;
5645         u8 *data;
5646         dma_addr_t mapping;
5647         int skb_size, data_size, dest_idx;
5648
5649         switch (opaque_key) {
5650         case RXD_OPAQUE_RING_STD:
5651                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5652                 desc = &tpr->rx_std[dest_idx];
5653                 map = &tpr->rx_std_buffers[dest_idx];
5654                 data_size = tp->rx_pkt_map_sz;
5655                 break;
5656
5657         case RXD_OPAQUE_RING_JUMBO:
5658                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5659                 desc = &tpr->rx_jmb[dest_idx].std;
5660                 map = &tpr->rx_jmb_buffers[dest_idx];
5661                 data_size = TG3_RX_JMB_MAP_SZ;
5662                 break;
5663
5664         default:
5665                 return -EINVAL;
5666         }
5667
5668         /* Do not overwrite any of the map or rp information
5669          * until we are sure we can commit to a new buffer.
5670          *
5671          * Callers depend upon this behavior and assume that
5672          * we leave everything unchanged if we fail.
5673          */
5674         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5675                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5676         data = kmalloc(skb_size, GFP_ATOMIC);
5677         if (!data)
5678                 return -ENOMEM;
5679
5680         mapping = pci_map_single(tp->pdev,
5681                                  data + TG3_RX_OFFSET(tp),
5682                                  data_size,
5683                                  PCI_DMA_FROMDEVICE);
5684         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5685                 kfree(data);
5686                 return -EIO;
5687         }
5688
5689         map->data = data;
5690         dma_unmap_addr_set(map, mapping, mapping);
5691
5692         desc->addr_hi = ((u64)mapping >> 32);
5693         desc->addr_lo = ((u64)mapping & 0xffffffff);
5694
5695         return data_size;
5696 }
5697
5698 /* We only need to move over in the address because the other
5699  * members of the RX descriptor are invariant.  See notes above
5700  * tg3_alloc_rx_data for full details.
5701  */
5702 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5703                            struct tg3_rx_prodring_set *dpr,
5704                            u32 opaque_key, int src_idx,
5705                            u32 dest_idx_unmasked)
5706 {
5707         struct tg3 *tp = tnapi->tp;
5708         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5709         struct ring_info *src_map, *dest_map;
5710         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5711         int dest_idx;
5712
5713         switch (opaque_key) {
5714         case RXD_OPAQUE_RING_STD:
5715                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5716                 dest_desc = &dpr->rx_std[dest_idx];
5717                 dest_map = &dpr->rx_std_buffers[dest_idx];
5718                 src_desc = &spr->rx_std[src_idx];
5719                 src_map = &spr->rx_std_buffers[src_idx];
5720                 break;
5721
5722         case RXD_OPAQUE_RING_JUMBO:
5723                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5724                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5725                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5726                 src_desc = &spr->rx_jmb[src_idx].std;
5727                 src_map = &spr->rx_jmb_buffers[src_idx];
5728                 break;
5729
5730         default:
5731                 return;
5732         }
5733
5734         dest_map->data = src_map->data;
5735         dma_unmap_addr_set(dest_map, mapping,
5736                            dma_unmap_addr(src_map, mapping));
5737         dest_desc->addr_hi = src_desc->addr_hi;
5738         dest_desc->addr_lo = src_desc->addr_lo;
5739
5740         /* Ensure that the update to the skb happens after the physical
5741          * addresses have been transferred to the new BD location.
5742          */
5743         smp_wmb();
5744
5745         src_map->data = NULL;
5746 }
5747
5748 /* The RX ring scheme is composed of multiple rings which post fresh
5749  * buffers to the chip, and one special ring the chip uses to report
5750  * status back to the host.
5751  *
5752  * The special ring reports the status of received packets to the
5753  * host.  The chip does not write into the original descriptor the
5754  * RX buffer was obtained from.  The chip simply takes the original
5755  * descriptor as provided by the host, updates the status and length
5756  * field, then writes this into the next status ring entry.
5757  *
5758  * Each ring the host uses to post buffers to the chip is described
5759  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5760  * it is first placed into the on-chip ram.  When the packet's length
5761  * is known, it walks down the TG3_BDINFO entries to select the ring.
5762  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5763  * which is within the range of the new packet's length is chosen.
5764  *
5765  * The "separate ring for rx status" scheme may sound queer, but it makes
5766  * sense from a cache coherency perspective.  If only the host writes
5767  * to the buffer post rings, and only the chip writes to the rx status
5768  * rings, then cache lines never move beyond shared-modified state.
5769  * If both the host and chip were to write into the same ring, cache line
5770  * eviction could occur since both entities want it in an exclusive state.
5771  */
5772 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5773 {
5774         struct tg3 *tp = tnapi->tp;
5775         u32 work_mask, rx_std_posted = 0;
5776         u32 std_prod_idx, jmb_prod_idx;
5777         u32 sw_idx = tnapi->rx_rcb_ptr;
5778         u16 hw_idx;
5779         int received;
5780         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5781
5782         hw_idx = *(tnapi->rx_rcb_prod_idx);
5783         /*
5784          * We need to order the read of hw_idx and the read of
5785          * the opaque cookie.
5786          */
5787         rmb();
5788         work_mask = 0;
5789         received = 0;
5790         std_prod_idx = tpr->rx_std_prod_idx;
5791         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5792         while (sw_idx != hw_idx && budget > 0) {
5793                 struct ring_info *ri;
5794                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5795                 unsigned int len;
5796                 struct sk_buff *skb;
5797                 dma_addr_t dma_addr;
5798                 u32 opaque_key, desc_idx, *post_ptr;
5799                 u8 *data;
5800
5801                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5802                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5803                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5804                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5805                         dma_addr = dma_unmap_addr(ri, mapping);
5806                         data = ri->data;
5807                         post_ptr = &std_prod_idx;
5808                         rx_std_posted++;
5809                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5810                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5811                         dma_addr = dma_unmap_addr(ri, mapping);
5812                         data = ri->data;
5813                         post_ptr = &jmb_prod_idx;
5814                 } else
5815                         goto next_pkt_nopost;
5816
5817                 work_mask |= opaque_key;
5818
5819                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5820                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5821                 drop_it:
5822                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5823                                        desc_idx, *post_ptr);
5824                 drop_it_no_recycle:
5825                         /* Other statistics kept track of by card. */
5826                         tp->rx_dropped++;
5827                         goto next_pkt;
5828                 }
5829
5830                 prefetch(data + TG3_RX_OFFSET(tp));
5831                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5832                       ETH_FCS_LEN;
5833
5834                 if (len > TG3_RX_COPY_THRESH(tp)) {
5835                         int skb_size;
5836
5837                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5838                                                     *post_ptr);
5839                         if (skb_size < 0)
5840                                 goto drop_it;
5841
5842                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5843                                          PCI_DMA_FROMDEVICE);
5844
5845                         skb = build_skb(data);
5846                         if (!skb) {
5847                                 kfree(data);
5848                                 goto drop_it_no_recycle;
5849                         }
5850                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5851                         /* Ensure that the update to the data happens
5852                          * after the usage of the old DMA mapping.
5853                          */
5854                         smp_wmb();
5855
5856                         ri->data = NULL;
5857
5858                 } else {
5859                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5860                                        desc_idx, *post_ptr);
5861
5862                         skb = netdev_alloc_skb(tp->dev,
5863                                                len + TG3_RAW_IP_ALIGN);
5864                         if (skb == NULL)
5865                                 goto drop_it_no_recycle;
5866
5867                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5868                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5869                         memcpy(skb->data,
5870                                data + TG3_RX_OFFSET(tp),
5871                                len);
5872                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5873                 }
5874
5875                 skb_put(skb, len);
5876                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5877                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5878                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5879                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5880                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5881                 else
5882                         skb_checksum_none_assert(skb);
5883
5884                 skb->protocol = eth_type_trans(skb, tp->dev);
5885
5886                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5887                     skb->protocol != htons(ETH_P_8021Q)) {
5888                         dev_kfree_skb(skb);
5889                         goto drop_it_no_recycle;
5890                 }
5891
5892                 if (desc->type_flags & RXD_FLAG_VLAN &&
5893                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5894                         __vlan_hwaccel_put_tag(skb,
5895                                                desc->err_vlan & RXD_VLAN_MASK);
5896
5897                 napi_gro_receive(&tnapi->napi, skb);
5898
5899                 received++;
5900                 budget--;
5901
5902 next_pkt:
5903                 (*post_ptr)++;
5904
5905                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5906                         tpr->rx_std_prod_idx = std_prod_idx &
5907                                                tp->rx_std_ring_mask;
5908                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5909                                      tpr->rx_std_prod_idx);
5910                         work_mask &= ~RXD_OPAQUE_RING_STD;
5911                         rx_std_posted = 0;
5912                 }
5913 next_pkt_nopost:
5914                 sw_idx++;
5915                 sw_idx &= tp->rx_ret_ring_mask;
5916
5917                 /* Refresh hw_idx to see if there is new work */
5918                 if (sw_idx == hw_idx) {
5919                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5920                         rmb();
5921                 }
5922         }
5923
5924         /* ACK the status ring. */
5925         tnapi->rx_rcb_ptr = sw_idx;
5926         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5927
5928         /* Refill RX ring(s). */
5929         if (!tg3_flag(tp, ENABLE_RSS)) {
5930                 /* Sync BD data before updating mailbox */
5931                 wmb();
5932
5933                 if (work_mask & RXD_OPAQUE_RING_STD) {
5934                         tpr->rx_std_prod_idx = std_prod_idx &
5935                                                tp->rx_std_ring_mask;
5936                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5937                                      tpr->rx_std_prod_idx);
5938                 }
5939                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5940                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5941                                                tp->rx_jmb_ring_mask;
5942                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5943                                      tpr->rx_jmb_prod_idx);
5944                 }
5945                 mmiowb();
5946         } else if (work_mask) {
5947                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5948                  * updated before the producer indices can be updated.
5949                  */
5950                 smp_wmb();
5951
5952                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5953                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5954
5955                 if (tnapi != &tp->napi[1]) {
5956                         tp->rx_refill = true;
5957                         napi_schedule(&tp->napi[1].napi);
5958                 }
5959         }
5960
5961         return received;
5962 }
5963
5964 static void tg3_poll_link(struct tg3 *tp)
5965 {
5966         /* handle link change and other phy events */
5967         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5968                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5969
5970                 if (sblk->status & SD_STATUS_LINK_CHG) {
5971                         sblk->status = SD_STATUS_UPDATED |
5972                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5973                         spin_lock(&tp->lock);
5974                         if (tg3_flag(tp, USE_PHYLIB)) {
5975                                 tw32_f(MAC_STATUS,
5976                                      (MAC_STATUS_SYNC_CHANGED |
5977                                       MAC_STATUS_CFG_CHANGED |
5978                                       MAC_STATUS_MI_COMPLETION |
5979                                       MAC_STATUS_LNKSTATE_CHANGED));
5980                                 udelay(40);
5981                         } else
5982                                 tg3_setup_phy(tp, 0);
5983                         spin_unlock(&tp->lock);
5984                 }
5985         }
5986 }
5987
5988 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5989                                 struct tg3_rx_prodring_set *dpr,
5990                                 struct tg3_rx_prodring_set *spr)
5991 {
5992         u32 si, di, cpycnt, src_prod_idx;
5993         int i, err = 0;
5994
5995         while (1) {
5996                 src_prod_idx = spr->rx_std_prod_idx;
5997
5998                 /* Make sure updates to the rx_std_buffers[] entries and the
5999                  * standard producer index are seen in the correct order.
6000                  */
6001                 smp_rmb();
6002
6003                 if (spr->rx_std_cons_idx == src_prod_idx)
6004                         break;
6005
6006                 if (spr->rx_std_cons_idx < src_prod_idx)
6007                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6008                 else
6009                         cpycnt = tp->rx_std_ring_mask + 1 -
6010                                  spr->rx_std_cons_idx;
6011
6012                 cpycnt = min(cpycnt,
6013                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6014
6015                 si = spr->rx_std_cons_idx;
6016                 di = dpr->rx_std_prod_idx;
6017
6018                 for (i = di; i < di + cpycnt; i++) {
6019                         if (dpr->rx_std_buffers[i].data) {
6020                                 cpycnt = i - di;
6021                                 err = -ENOSPC;
6022                                 break;
6023                         }
6024                 }
6025
6026                 if (!cpycnt)
6027                         break;
6028
6029                 /* Ensure that updates to the rx_std_buffers ring and the
6030                  * shadowed hardware producer ring from tg3_recycle_skb() are
6031                  * ordered correctly WRT the skb check above.
6032                  */
6033                 smp_rmb();
6034
6035                 memcpy(&dpr->rx_std_buffers[di],
6036                        &spr->rx_std_buffers[si],
6037                        cpycnt * sizeof(struct ring_info));
6038
6039                 for (i = 0; i < cpycnt; i++, di++, si++) {
6040                         struct tg3_rx_buffer_desc *sbd, *dbd;
6041                         sbd = &spr->rx_std[si];
6042                         dbd = &dpr->rx_std[di];
6043                         dbd->addr_hi = sbd->addr_hi;
6044                         dbd->addr_lo = sbd->addr_lo;
6045                 }
6046
6047                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6048                                        tp->rx_std_ring_mask;
6049                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6050                                        tp->rx_std_ring_mask;
6051         }
6052
6053         while (1) {
6054                 src_prod_idx = spr->rx_jmb_prod_idx;
6055
6056                 /* Make sure updates to the rx_jmb_buffers[] entries and
6057                  * the jumbo producer index are seen in the correct order.
6058                  */
6059                 smp_rmb();
6060
6061                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6062                         break;
6063
6064                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6065                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6066                 else
6067                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6068                                  spr->rx_jmb_cons_idx;
6069
6070                 cpycnt = min(cpycnt,
6071                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6072
6073                 si = spr->rx_jmb_cons_idx;
6074                 di = dpr->rx_jmb_prod_idx;
6075
6076                 for (i = di; i < di + cpycnt; i++) {
6077                         if (dpr->rx_jmb_buffers[i].data) {
6078                                 cpycnt = i - di;
6079                                 err = -ENOSPC;
6080                                 break;
6081                         }
6082                 }
6083
6084                 if (!cpycnt)
6085                         break;
6086
6087                 /* Ensure that updates to the rx_jmb_buffers ring and the
6088                  * shadowed hardware producer ring from tg3_recycle_skb() are
6089                  * ordered correctly WRT the skb check above.
6090                  */
6091                 smp_rmb();
6092
6093                 memcpy(&dpr->rx_jmb_buffers[di],
6094                        &spr->rx_jmb_buffers[si],
6095                        cpycnt * sizeof(struct ring_info));
6096
6097                 for (i = 0; i < cpycnt; i++, di++, si++) {
6098                         struct tg3_rx_buffer_desc *sbd, *dbd;
6099                         sbd = &spr->rx_jmb[si].std;
6100                         dbd = &dpr->rx_jmb[di].std;
6101                         dbd->addr_hi = sbd->addr_hi;
6102                         dbd->addr_lo = sbd->addr_lo;
6103                 }
6104
6105                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6106                                        tp->rx_jmb_ring_mask;
6107                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6108                                        tp->rx_jmb_ring_mask;
6109         }
6110
6111         return err;
6112 }
6113
6114 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6115 {
6116         struct tg3 *tp = tnapi->tp;
6117
6118         /* run TX completion thread */
6119         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6120                 tg3_tx(tnapi);
6121                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6122                         return work_done;
6123         }
6124
6125         /* run RX thread, within the bounds set by NAPI.
6126          * All RX "locking" is done by ensuring outside
6127          * code synchronizes with tg3->napi.poll()
6128          */
6129         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6130                 work_done += tg3_rx(tnapi, budget - work_done);
6131
6132         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6133                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6134                 int i, err = 0;
6135                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6136                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6137
6138                 tp->rx_refill = false;
6139                 for (i = 1; i < tp->irq_cnt; i++)
6140                         err |= tg3_rx_prodring_xfer(tp, dpr,
6141                                                     &tp->napi[i].prodring);
6142
6143                 wmb();
6144
6145                 if (std_prod_idx != dpr->rx_std_prod_idx)
6146                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6147                                      dpr->rx_std_prod_idx);
6148
6149                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6150                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6151                                      dpr->rx_jmb_prod_idx);
6152
6153                 mmiowb();
6154
6155                 if (err)
6156                         tw32_f(HOSTCC_MODE, tp->coal_now);
6157         }
6158
6159         return work_done;
6160 }
6161
6162 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6163 {
6164         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6165                 schedule_work(&tp->reset_task);
6166 }
6167
6168 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6169 {
6170         cancel_work_sync(&tp->reset_task);
6171         tg3_flag_clear(tp, RESET_TASK_PENDING);
6172         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6173 }
6174
6175 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6176 {
6177         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6178         struct tg3 *tp = tnapi->tp;
6179         int work_done = 0;
6180         struct tg3_hw_status *sblk = tnapi->hw_status;
6181
6182         while (1) {
6183                 work_done = tg3_poll_work(tnapi, work_done, budget);
6184
6185                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6186                         goto tx_recovery;
6187
6188                 if (unlikely(work_done >= budget))
6189                         break;
6190
6191                 /* tp->last_tag is used in tg3_int_reenable() below
6192                  * to tell the hw how much work has been processed,
6193                  * so we must read it before checking for more work.
6194                  */
6195                 tnapi->last_tag = sblk->status_tag;
6196                 tnapi->last_irq_tag = tnapi->last_tag;
6197                 rmb();
6198
6199                 /* check for RX/TX work to do */
6200                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6201                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6202
6203                         /* This test here is not race free, but will reduce
6204                          * the number of interrupts by looping again.
6205                          */
6206                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6207                                 continue;
6208
6209                         napi_complete(napi);
6210                         /* Reenable interrupts. */
6211                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6212
6213                         /* This test here is synchronized by napi_schedule()
6214                          * and napi_complete() to close the race condition.
6215                          */
6216                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6217                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6218                                                   HOSTCC_MODE_ENABLE |
6219                                                   tnapi->coal_now);
6220                         }
6221                         mmiowb();
6222                         break;
6223                 }
6224         }
6225
6226         return work_done;
6227
6228 tx_recovery:
6229         /* work_done is guaranteed to be less than budget. */
6230         napi_complete(napi);
6231         tg3_reset_task_schedule(tp);
6232         return work_done;
6233 }
6234
6235 static void tg3_process_error(struct tg3 *tp)
6236 {
6237         u32 val;
6238         bool real_error = false;
6239
6240         if (tg3_flag(tp, ERROR_PROCESSED))
6241                 return;
6242
6243         /* Check Flow Attention register */
6244         val = tr32(HOSTCC_FLOW_ATTN);
6245         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6246                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6247                 real_error = true;
6248         }
6249
6250         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6251                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6252                 real_error = true;
6253         }
6254
6255         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6256                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6257                 real_error = true;
6258         }
6259
6260         if (!real_error)
6261                 return;
6262
6263         tg3_dump_state(tp);
6264
6265         tg3_flag_set(tp, ERROR_PROCESSED);
6266         tg3_reset_task_schedule(tp);
6267 }
6268
6269 static int tg3_poll(struct napi_struct *napi, int budget)
6270 {
6271         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6272         struct tg3 *tp = tnapi->tp;
6273         int work_done = 0;
6274         struct tg3_hw_status *sblk = tnapi->hw_status;
6275
6276         while (1) {
6277                 if (sblk->status & SD_STATUS_ERROR)
6278                         tg3_process_error(tp);
6279
6280                 tg3_poll_link(tp);
6281
6282                 work_done = tg3_poll_work(tnapi, work_done, budget);
6283
6284                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6285                         goto tx_recovery;
6286
6287                 if (unlikely(work_done >= budget))
6288                         break;
6289
6290                 if (tg3_flag(tp, TAGGED_STATUS)) {
6291                         /* tp->last_tag is used in tg3_int_reenable() below
6292                          * to tell the hw how much work has been processed,
6293                          * so we must read it before checking for more work.
6294                          */
6295                         tnapi->last_tag = sblk->status_tag;
6296                         tnapi->last_irq_tag = tnapi->last_tag;
6297                         rmb();
6298                 } else
6299                         sblk->status &= ~SD_STATUS_UPDATED;
6300
6301                 if (likely(!tg3_has_work(tnapi))) {
6302                         napi_complete(napi);
6303                         tg3_int_reenable(tnapi);
6304                         break;
6305                 }
6306         }
6307
6308         return work_done;
6309
6310 tx_recovery:
6311         /* work_done is guaranteed to be less than budget. */
6312         napi_complete(napi);
6313         tg3_reset_task_schedule(tp);
6314         return work_done;
6315 }
6316
6317 static void tg3_napi_disable(struct tg3 *tp)
6318 {
6319         int i;
6320
6321         for (i = tp->irq_cnt - 1; i >= 0; i--)
6322                 napi_disable(&tp->napi[i].napi);
6323 }
6324
6325 static void tg3_napi_enable(struct tg3 *tp)
6326 {
6327         int i;
6328
6329         for (i = 0; i < tp->irq_cnt; i++)
6330                 napi_enable(&tp->napi[i].napi);
6331 }
6332
6333 static void tg3_napi_init(struct tg3 *tp)
6334 {
6335         int i;
6336
6337         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6338         for (i = 1; i < tp->irq_cnt; i++)
6339                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6340 }
6341
6342 static void tg3_napi_fini(struct tg3 *tp)
6343 {
6344         int i;
6345
6346         for (i = 0; i < tp->irq_cnt; i++)
6347                 netif_napi_del(&tp->napi[i].napi);
6348 }
6349
6350 static inline void tg3_netif_stop(struct tg3 *tp)
6351 {
6352         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6353         tg3_napi_disable(tp);
6354         netif_tx_disable(tp->dev);
6355 }
6356
6357 static inline void tg3_netif_start(struct tg3 *tp)
6358 {
6359         /* NOTE: unconditional netif_tx_wake_all_queues is only
6360          * appropriate so long as all callers are assured to
6361          * have free tx slots (such as after tg3_init_hw)
6362          */
6363         netif_tx_wake_all_queues(tp->dev);
6364
6365         tg3_napi_enable(tp);
6366         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6367         tg3_enable_ints(tp);
6368 }
6369
6370 static void tg3_irq_quiesce(struct tg3 *tp)
6371 {
6372         int i;
6373
6374         BUG_ON(tp->irq_sync);
6375
6376         tp->irq_sync = 1;
6377         smp_mb();
6378
6379         for (i = 0; i < tp->irq_cnt; i++)
6380                 synchronize_irq(tp->napi[i].irq_vec);
6381 }
6382
6383 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6384  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6385  * with as well.  Most of the time, this is not necessary except when
6386  * shutting down the device.
6387  */
6388 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6389 {
6390         spin_lock_bh(&tp->lock);
6391         if (irq_sync)
6392                 tg3_irq_quiesce(tp);
6393 }
6394
6395 static inline void tg3_full_unlock(struct tg3 *tp)
6396 {
6397         spin_unlock_bh(&tp->lock);
6398 }
6399
6400 /* One-shot MSI handler - Chip automatically disables interrupt
6401  * after sending MSI so driver doesn't have to do it.
6402  */
6403 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6404 {
6405         struct tg3_napi *tnapi = dev_id;
6406         struct tg3 *tp = tnapi->tp;
6407
6408         prefetch(tnapi->hw_status);
6409         if (tnapi->rx_rcb)
6410                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6411
6412         if (likely(!tg3_irq_sync(tp)))
6413                 napi_schedule(&tnapi->napi);
6414
6415         return IRQ_HANDLED;
6416 }
6417
6418 /* MSI ISR - No need to check for interrupt sharing and no need to
6419  * flush status block and interrupt mailbox. PCI ordering rules
6420  * guarantee that MSI will arrive after the status block.
6421  */
6422 static irqreturn_t tg3_msi(int irq, void *dev_id)
6423 {
6424         struct tg3_napi *tnapi = dev_id;
6425         struct tg3 *tp = tnapi->tp;
6426
6427         prefetch(tnapi->hw_status);
6428         if (tnapi->rx_rcb)
6429                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6430         /*
6431          * Writing any value to intr-mbox-0 clears PCI INTA# and
6432          * chip-internal interrupt pending events.
6433          * Writing non-zero to intr-mbox-0 additional tells the
6434          * NIC to stop sending us irqs, engaging "in-intr-handler"
6435          * event coalescing.
6436          */
6437         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6438         if (likely(!tg3_irq_sync(tp)))
6439                 napi_schedule(&tnapi->napi);
6440
6441         return IRQ_RETVAL(1);
6442 }
6443
6444 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6445 {
6446         struct tg3_napi *tnapi = dev_id;
6447         struct tg3 *tp = tnapi->tp;
6448         struct tg3_hw_status *sblk = tnapi->hw_status;
6449         unsigned int handled = 1;
6450
6451         /* In INTx mode, it is possible for the interrupt to arrive at
6452          * the CPU before the status block posted prior to the interrupt.
6453          * Reading the PCI State register will confirm whether the
6454          * interrupt is ours and will flush the status block.
6455          */
6456         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6457                 if (tg3_flag(tp, CHIP_RESETTING) ||
6458                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6459                         handled = 0;
6460                         goto out;
6461                 }
6462         }
6463
6464         /*
6465          * Writing any value to intr-mbox-0 clears PCI INTA# and
6466          * chip-internal interrupt pending events.
6467          * Writing non-zero to intr-mbox-0 additional tells the
6468          * NIC to stop sending us irqs, engaging "in-intr-handler"
6469          * event coalescing.
6470          *
6471          * Flush the mailbox to de-assert the IRQ immediately to prevent
6472          * spurious interrupts.  The flush impacts performance but
6473          * excessive spurious interrupts can be worse in some cases.
6474          */
6475         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6476         if (tg3_irq_sync(tp))
6477                 goto out;
6478         sblk->status &= ~SD_STATUS_UPDATED;
6479         if (likely(tg3_has_work(tnapi))) {
6480                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6481                 napi_schedule(&tnapi->napi);
6482         } else {
6483                 /* No work, shared interrupt perhaps?  re-enable
6484                  * interrupts, and flush that PCI write
6485                  */
6486                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6487                                0x00000000);
6488         }
6489 out:
6490         return IRQ_RETVAL(handled);
6491 }
6492
6493 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6494 {
6495         struct tg3_napi *tnapi = dev_id;
6496         struct tg3 *tp = tnapi->tp;
6497         struct tg3_hw_status *sblk = tnapi->hw_status;
6498         unsigned int handled = 1;
6499
6500         /* In INTx mode, it is possible for the interrupt to arrive at
6501          * the CPU before the status block posted prior to the interrupt.
6502          * Reading the PCI State register will confirm whether the
6503          * interrupt is ours and will flush the status block.
6504          */
6505         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6506                 if (tg3_flag(tp, CHIP_RESETTING) ||
6507                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6508                         handled = 0;
6509                         goto out;
6510                 }
6511         }
6512
6513         /*
6514          * writing any value to intr-mbox-0 clears PCI INTA# and
6515          * chip-internal interrupt pending events.
6516          * writing non-zero to intr-mbox-0 additional tells the
6517          * NIC to stop sending us irqs, engaging "in-intr-handler"
6518          * event coalescing.
6519          *
6520          * Flush the mailbox to de-assert the IRQ immediately to prevent
6521          * spurious interrupts.  The flush impacts performance but
6522          * excessive spurious interrupts can be worse in some cases.
6523          */
6524         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6525
6526         /*
6527          * In a shared interrupt configuration, sometimes other devices'
6528          * interrupts will scream.  We record the current status tag here
6529          * so that the above check can report that the screaming interrupts
6530          * are unhandled.  Eventually they will be silenced.
6531          */
6532         tnapi->last_irq_tag = sblk->status_tag;
6533
6534         if (tg3_irq_sync(tp))
6535                 goto out;
6536
6537         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6538
6539         napi_schedule(&tnapi->napi);
6540
6541 out:
6542         return IRQ_RETVAL(handled);
6543 }
6544
6545 /* ISR for interrupt test */
6546 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6547 {
6548         struct tg3_napi *tnapi = dev_id;
6549         struct tg3 *tp = tnapi->tp;
6550         struct tg3_hw_status *sblk = tnapi->hw_status;
6551
6552         if ((sblk->status & SD_STATUS_UPDATED) ||
6553             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6554                 tg3_disable_ints(tp);
6555                 return IRQ_RETVAL(1);
6556         }
6557         return IRQ_RETVAL(0);
6558 }
6559
6560 #ifdef CONFIG_NET_POLL_CONTROLLER
6561 static void tg3_poll_controller(struct net_device *dev)
6562 {
6563         int i;
6564         struct tg3 *tp = netdev_priv(dev);
6565
6566         for (i = 0; i < tp->irq_cnt; i++)
6567                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6568 }
6569 #endif
6570
6571 static void tg3_tx_timeout(struct net_device *dev)
6572 {
6573         struct tg3 *tp = netdev_priv(dev);
6574
6575         if (netif_msg_tx_err(tp)) {
6576                 netdev_err(dev, "transmit timed out, resetting\n");
6577                 tg3_dump_state(tp);
6578         }
6579
6580         tg3_reset_task_schedule(tp);
6581 }
6582
6583 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6584 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6585 {
6586         u32 base = (u32) mapping & 0xffffffff;
6587
6588         return (base > 0xffffdcc0) && (base + len + 8 < base);
6589 }
6590
6591 /* Test for DMA addresses > 40-bit */
6592 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6593                                           int len)
6594 {
6595 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6596         if (tg3_flag(tp, 40BIT_DMA_BUG))
6597                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6598         return 0;
6599 #else
6600         return 0;
6601 #endif
6602 }
6603
6604 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6605                                  dma_addr_t mapping, u32 len, u32 flags,
6606                                  u32 mss, u32 vlan)
6607 {
6608         txbd->addr_hi = ((u64) mapping >> 32);
6609         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6610         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6611         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6612 }
6613
6614 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6615                             dma_addr_t map, u32 len, u32 flags,
6616                             u32 mss, u32 vlan)
6617 {
6618         struct tg3 *tp = tnapi->tp;
6619         bool hwbug = false;
6620
6621         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6622                 hwbug = true;
6623
6624         if (tg3_4g_overflow_test(map, len))
6625                 hwbug = true;
6626
6627         if (tg3_40bit_overflow_test(tp, map, len))
6628                 hwbug = true;
6629
6630         if (tp->dma_limit) {
6631                 u32 prvidx = *entry;
6632                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6633                 while (len > tp->dma_limit && *budget) {
6634                         u32 frag_len = tp->dma_limit;
6635                         len -= tp->dma_limit;
6636
6637                         /* Avoid the 8byte DMA problem */
6638                         if (len <= 8) {
6639                                 len += tp->dma_limit / 2;
6640                                 frag_len = tp->dma_limit / 2;
6641                         }
6642
6643                         tnapi->tx_buffers[*entry].fragmented = true;
6644
6645                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6646                                       frag_len, tmp_flag, mss, vlan);
6647                         *budget -= 1;
6648                         prvidx = *entry;
6649                         *entry = NEXT_TX(*entry);
6650
6651                         map += frag_len;
6652                 }
6653
6654                 if (len) {
6655                         if (*budget) {
6656                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6657                                               len, flags, mss, vlan);
6658                                 *budget -= 1;
6659                                 *entry = NEXT_TX(*entry);
6660                         } else {
6661                                 hwbug = true;
6662                                 tnapi->tx_buffers[prvidx].fragmented = false;
6663                         }
6664                 }
6665         } else {
6666                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6667                               len, flags, mss, vlan);
6668                 *entry = NEXT_TX(*entry);
6669         }
6670
6671         return hwbug;
6672 }
6673
6674 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6675 {
6676         int i;
6677         struct sk_buff *skb;
6678         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6679
6680         skb = txb->skb;
6681         txb->skb = NULL;
6682
6683         pci_unmap_single(tnapi->tp->pdev,
6684                          dma_unmap_addr(txb, mapping),
6685                          skb_headlen(skb),
6686                          PCI_DMA_TODEVICE);
6687
6688         while (txb->fragmented) {
6689                 txb->fragmented = false;
6690                 entry = NEXT_TX(entry);
6691                 txb = &tnapi->tx_buffers[entry];
6692         }
6693
6694         for (i = 0; i <= last; i++) {
6695                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6696
6697                 entry = NEXT_TX(entry);
6698                 txb = &tnapi->tx_buffers[entry];
6699
6700                 pci_unmap_page(tnapi->tp->pdev,
6701                                dma_unmap_addr(txb, mapping),
6702                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6703
6704                 while (txb->fragmented) {
6705                         txb->fragmented = false;
6706                         entry = NEXT_TX(entry);
6707                         txb = &tnapi->tx_buffers[entry];
6708                 }
6709         }
6710 }
6711
6712 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6713 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6714                                        struct sk_buff **pskb,
6715                                        u32 *entry, u32 *budget,
6716                                        u32 base_flags, u32 mss, u32 vlan)
6717 {
6718         struct tg3 *tp = tnapi->tp;
6719         struct sk_buff *new_skb, *skb = *pskb;
6720         dma_addr_t new_addr = 0;
6721         int ret = 0;
6722
6723         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6724                 new_skb = skb_copy(skb, GFP_ATOMIC);
6725         else {
6726                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6727
6728                 new_skb = skb_copy_expand(skb,
6729                                           skb_headroom(skb) + more_headroom,
6730                                           skb_tailroom(skb), GFP_ATOMIC);
6731         }
6732
6733         if (!new_skb) {
6734                 ret = -1;
6735         } else {
6736                 /* New SKB is guaranteed to be linear. */
6737                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6738                                           PCI_DMA_TODEVICE);
6739                 /* Make sure the mapping succeeded */
6740                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6741                         dev_kfree_skb(new_skb);
6742                         ret = -1;
6743                 } else {
6744                         u32 save_entry = *entry;
6745
6746                         base_flags |= TXD_FLAG_END;
6747
6748                         tnapi->tx_buffers[*entry].skb = new_skb;
6749                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6750                                            mapping, new_addr);
6751
6752                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6753                                             new_skb->len, base_flags,
6754                                             mss, vlan)) {
6755                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6756                                 dev_kfree_skb(new_skb);
6757                                 ret = -1;
6758                         }
6759                 }
6760         }
6761
6762         dev_kfree_skb(skb);
6763         *pskb = new_skb;
6764         return ret;
6765 }
6766
6767 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6768
6769 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6770  * TSO header is greater than 80 bytes.
6771  */
6772 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6773 {
6774         struct sk_buff *segs, *nskb;
6775         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6776
6777         /* Estimate the number of fragments in the worst case */
6778         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6779                 netif_stop_queue(tp->dev);
6780
6781                 /* netif_tx_stop_queue() must be done before checking
6782                  * checking tx index in tg3_tx_avail() below, because in
6783                  * tg3_tx(), we update tx index before checking for
6784                  * netif_tx_queue_stopped().
6785                  */
6786                 smp_mb();
6787                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6788                         return NETDEV_TX_BUSY;
6789
6790                 netif_wake_queue(tp->dev);
6791         }
6792
6793         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6794         if (IS_ERR(segs))
6795                 goto tg3_tso_bug_end;
6796
6797         do {
6798                 nskb = segs;
6799                 segs = segs->next;
6800                 nskb->next = NULL;
6801                 tg3_start_xmit(nskb, tp->dev);
6802         } while (segs);
6803
6804 tg3_tso_bug_end:
6805         dev_kfree_skb(skb);
6806
6807         return NETDEV_TX_OK;
6808 }
6809
6810 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6811  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6812  */
6813 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6814 {
6815         struct tg3 *tp = netdev_priv(dev);
6816         u32 len, entry, base_flags, mss, vlan = 0;
6817         u32 budget;
6818         int i = -1, would_hit_hwbug;
6819         dma_addr_t mapping;
6820         struct tg3_napi *tnapi;
6821         struct netdev_queue *txq;
6822         unsigned int last;
6823
6824         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6825         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6826         if (tg3_flag(tp, ENABLE_TSS))
6827                 tnapi++;
6828
6829         budget = tg3_tx_avail(tnapi);
6830
6831         /* We are running in BH disabled context with netif_tx_lock
6832          * and TX reclaim runs via tp->napi.poll inside of a software
6833          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6834          * no IRQ context deadlocks to worry about either.  Rejoice!
6835          */
6836         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6837                 if (!netif_tx_queue_stopped(txq)) {
6838                         netif_tx_stop_queue(txq);
6839
6840                         /* This is a hard error, log it. */
6841                         netdev_err(dev,
6842                                    "BUG! Tx Ring full when queue awake!\n");
6843                 }
6844                 return NETDEV_TX_BUSY;
6845         }
6846
6847         entry = tnapi->tx_prod;
6848         base_flags = 0;
6849         if (skb->ip_summed == CHECKSUM_PARTIAL)
6850                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6851
6852         mss = skb_shinfo(skb)->gso_size;
6853         if (mss) {
6854                 struct iphdr *iph;
6855                 u32 tcp_opt_len, hdr_len;
6856
6857                 if (skb_header_cloned(skb) &&
6858                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6859                         goto drop;
6860
6861                 iph = ip_hdr(skb);
6862                 tcp_opt_len = tcp_optlen(skb);
6863
6864                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6865
6866                 if (!skb_is_gso_v6(skb)) {
6867                         iph->check = 0;
6868                         iph->tot_len = htons(mss + hdr_len);
6869                 }
6870
6871                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6872                     tg3_flag(tp, TSO_BUG))
6873                         return tg3_tso_bug(tp, skb);
6874
6875                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6876                                TXD_FLAG_CPU_POST_DMA);
6877
6878                 if (tg3_flag(tp, HW_TSO_1) ||
6879                     tg3_flag(tp, HW_TSO_2) ||
6880                     tg3_flag(tp, HW_TSO_3)) {
6881                         tcp_hdr(skb)->check = 0;
6882                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6883                 } else
6884                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6885                                                                  iph->daddr, 0,
6886                                                                  IPPROTO_TCP,
6887                                                                  0);
6888
6889                 if (tg3_flag(tp, HW_TSO_3)) {
6890                         mss |= (hdr_len & 0xc) << 12;
6891                         if (hdr_len & 0x10)
6892                                 base_flags |= 0x00000010;
6893                         base_flags |= (hdr_len & 0x3e0) << 5;
6894                 } else if (tg3_flag(tp, HW_TSO_2))
6895                         mss |= hdr_len << 9;
6896                 else if (tg3_flag(tp, HW_TSO_1) ||
6897                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6898                         if (tcp_opt_len || iph->ihl > 5) {
6899                                 int tsflags;
6900
6901                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6902                                 mss |= (tsflags << 11);
6903                         }
6904                 } else {
6905                         if (tcp_opt_len || iph->ihl > 5) {
6906                                 int tsflags;
6907
6908                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6909                                 base_flags |= tsflags << 12;
6910                         }
6911                 }
6912         }
6913
6914         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6915             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6916                 base_flags |= TXD_FLAG_JMB_PKT;
6917
6918         if (vlan_tx_tag_present(skb)) {
6919                 base_flags |= TXD_FLAG_VLAN;
6920                 vlan = vlan_tx_tag_get(skb);
6921         }
6922
6923         len = skb_headlen(skb);
6924
6925         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6926         if (pci_dma_mapping_error(tp->pdev, mapping))
6927                 goto drop;
6928
6929
6930         tnapi->tx_buffers[entry].skb = skb;
6931         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6932
6933         would_hit_hwbug = 0;
6934
6935         if (tg3_flag(tp, 5701_DMA_BUG))
6936                 would_hit_hwbug = 1;
6937
6938         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6939                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6940                             mss, vlan)) {
6941                 would_hit_hwbug = 1;
6942         } else if (skb_shinfo(skb)->nr_frags > 0) {
6943                 u32 tmp_mss = mss;
6944
6945                 if (!tg3_flag(tp, HW_TSO_1) &&
6946                     !tg3_flag(tp, HW_TSO_2) &&
6947                     !tg3_flag(tp, HW_TSO_3))
6948                         tmp_mss = 0;
6949
6950                 /* Now loop through additional data
6951                  * fragments, and queue them.
6952                  */
6953                 last = skb_shinfo(skb)->nr_frags - 1;
6954                 for (i = 0; i <= last; i++) {
6955                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6956
6957                         len = skb_frag_size(frag);
6958                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6959                                                    len, DMA_TO_DEVICE);
6960
6961                         tnapi->tx_buffers[entry].skb = NULL;
6962                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6963                                            mapping);
6964                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6965                                 goto dma_error;
6966
6967                         if (!budget ||
6968                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6969                                             len, base_flags |
6970                                             ((i == last) ? TXD_FLAG_END : 0),
6971                                             tmp_mss, vlan)) {
6972                                 would_hit_hwbug = 1;
6973                                 break;
6974                         }
6975                 }
6976         }
6977
6978         if (would_hit_hwbug) {
6979                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6980
6981                 /* If the workaround fails due to memory/mapping
6982                  * failure, silently drop this packet.
6983                  */
6984                 entry = tnapi->tx_prod;
6985                 budget = tg3_tx_avail(tnapi);
6986                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6987                                                 base_flags, mss, vlan))
6988                         goto drop_nofree;
6989         }
6990
6991         skb_tx_timestamp(skb);
6992         netdev_tx_sent_queue(txq, skb->len);
6993
6994         /* Sync BD data before updating mailbox */
6995         wmb();
6996
6997         /* Packets are ready, update Tx producer idx local and on card. */
6998         tw32_tx_mbox(tnapi->prodmbox, entry);
6999
7000         tnapi->tx_prod = entry;
7001         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7002                 netif_tx_stop_queue(txq);
7003
7004                 /* netif_tx_stop_queue() must be done before checking
7005                  * checking tx index in tg3_tx_avail() below, because in
7006                  * tg3_tx(), we update tx index before checking for
7007                  * netif_tx_queue_stopped().
7008                  */
7009                 smp_mb();
7010                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7011                         netif_tx_wake_queue(txq);
7012         }
7013
7014         mmiowb();
7015         return NETDEV_TX_OK;
7016
7017 dma_error:
7018         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7019         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7020 drop:
7021         dev_kfree_skb(skb);
7022 drop_nofree:
7023         tp->tx_dropped++;
7024         return NETDEV_TX_OK;
7025 }
7026
7027 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7028 {
7029         if (enable) {
7030                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7031                                   MAC_MODE_PORT_MODE_MASK);
7032
7033                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7034
7035                 if (!tg3_flag(tp, 5705_PLUS))
7036                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7037
7038                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7039                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7040                 else
7041                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7042         } else {
7043                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7044
7045                 if (tg3_flag(tp, 5705_PLUS) ||
7046                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7047                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7048                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7049         }
7050
7051         tw32(MAC_MODE, tp->mac_mode);
7052         udelay(40);
7053 }
7054
7055 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7056 {
7057         u32 val, bmcr, mac_mode, ptest = 0;
7058
7059         tg3_phy_toggle_apd(tp, false);
7060         tg3_phy_toggle_automdix(tp, 0);
7061
7062         if (extlpbk && tg3_phy_set_extloopbk(tp))
7063                 return -EIO;
7064
7065         bmcr = BMCR_FULLDPLX;
7066         switch (speed) {
7067         case SPEED_10:
7068                 break;
7069         case SPEED_100:
7070                 bmcr |= BMCR_SPEED100;
7071                 break;
7072         case SPEED_1000:
7073         default:
7074                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7075                         speed = SPEED_100;
7076                         bmcr |= BMCR_SPEED100;
7077                 } else {
7078                         speed = SPEED_1000;
7079                         bmcr |= BMCR_SPEED1000;
7080                 }
7081         }
7082
7083         if (extlpbk) {
7084                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7085                         tg3_readphy(tp, MII_CTRL1000, &val);
7086                         val |= CTL1000_AS_MASTER |
7087                                CTL1000_ENABLE_MASTER;
7088                         tg3_writephy(tp, MII_CTRL1000, val);
7089                 } else {
7090                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7091                                 MII_TG3_FET_PTEST_TRIM_2;
7092                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7093                 }
7094         } else
7095                 bmcr |= BMCR_LOOPBACK;
7096
7097         tg3_writephy(tp, MII_BMCR, bmcr);
7098
7099         /* The write needs to be flushed for the FETs */
7100         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7101                 tg3_readphy(tp, MII_BMCR, &bmcr);
7102
7103         udelay(40);
7104
7105         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7106             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7107                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7108                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7109                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7110
7111                 /* The write needs to be flushed for the AC131 */
7112                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7113         }
7114
7115         /* Reset to prevent losing 1st rx packet intermittently */
7116         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7117             tg3_flag(tp, 5780_CLASS)) {
7118                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7119                 udelay(10);
7120                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7121         }
7122
7123         mac_mode = tp->mac_mode &
7124                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7125         if (speed == SPEED_1000)
7126                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7127         else
7128                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7129
7130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7131                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7132
7133                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7134                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7135                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7136                         mac_mode |= MAC_MODE_LINK_POLARITY;
7137
7138                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7139                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7140         }
7141
7142         tw32(MAC_MODE, mac_mode);
7143         udelay(40);
7144
7145         return 0;
7146 }
7147
7148 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7149 {
7150         struct tg3 *tp = netdev_priv(dev);
7151
7152         if (features & NETIF_F_LOOPBACK) {
7153                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7154                         return;
7155
7156                 spin_lock_bh(&tp->lock);
7157                 tg3_mac_loopback(tp, true);
7158                 netif_carrier_on(tp->dev);
7159                 spin_unlock_bh(&tp->lock);
7160                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7161         } else {
7162                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7163                         return;
7164
7165                 spin_lock_bh(&tp->lock);
7166                 tg3_mac_loopback(tp, false);
7167                 /* Force link status check */
7168                 tg3_setup_phy(tp, 1);
7169                 spin_unlock_bh(&tp->lock);
7170                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7171         }
7172 }
7173
7174 static netdev_features_t tg3_fix_features(struct net_device *dev,
7175         netdev_features_t features)
7176 {
7177         struct tg3 *tp = netdev_priv(dev);
7178
7179         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7180                 features &= ~NETIF_F_ALL_TSO;
7181
7182         return features;
7183 }
7184
7185 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7186 {
7187         netdev_features_t changed = dev->features ^ features;
7188
7189         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7190                 tg3_set_loopback(dev, features);
7191
7192         return 0;
7193 }
7194
7195 static void tg3_rx_prodring_free(struct tg3 *tp,
7196                                  struct tg3_rx_prodring_set *tpr)
7197 {
7198         int i;
7199
7200         if (tpr != &tp->napi[0].prodring) {
7201                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7202                      i = (i + 1) & tp->rx_std_ring_mask)
7203                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7204                                         tp->rx_pkt_map_sz);
7205
7206                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7207                         for (i = tpr->rx_jmb_cons_idx;
7208                              i != tpr->rx_jmb_prod_idx;
7209                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7210                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7211                                                 TG3_RX_JMB_MAP_SZ);
7212                         }
7213                 }
7214
7215                 return;
7216         }
7217
7218         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7219                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7220                                 tp->rx_pkt_map_sz);
7221
7222         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7223                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7224                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7225                                         TG3_RX_JMB_MAP_SZ);
7226         }
7227 }
7228
7229 /* Initialize rx rings for packet processing.
7230  *
7231  * The chip has been shut down and the driver detached from
7232  * the networking, so no interrupts or new tx packets will
7233  * end up in the driver.  tp->{tx,}lock are held and thus
7234  * we may not sleep.
7235  */
7236 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7237                                  struct tg3_rx_prodring_set *tpr)
7238 {
7239         u32 i, rx_pkt_dma_sz;
7240
7241         tpr->rx_std_cons_idx = 0;
7242         tpr->rx_std_prod_idx = 0;
7243         tpr->rx_jmb_cons_idx = 0;
7244         tpr->rx_jmb_prod_idx = 0;
7245
7246         if (tpr != &tp->napi[0].prodring) {
7247                 memset(&tpr->rx_std_buffers[0], 0,
7248                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7249                 if (tpr->rx_jmb_buffers)
7250                         memset(&tpr->rx_jmb_buffers[0], 0,
7251                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7252                 goto done;
7253         }
7254
7255         /* Zero out all descriptors. */
7256         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7257
7258         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7259         if (tg3_flag(tp, 5780_CLASS) &&
7260             tp->dev->mtu > ETH_DATA_LEN)
7261                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7262         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7263
7264         /* Initialize invariants of the rings, we only set this
7265          * stuff once.  This works because the card does not
7266          * write into the rx buffer posting rings.
7267          */
7268         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7269                 struct tg3_rx_buffer_desc *rxd;
7270
7271                 rxd = &tpr->rx_std[i];
7272                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7273                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7274                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7275                                (i << RXD_OPAQUE_INDEX_SHIFT));
7276         }
7277
7278         /* Now allocate fresh SKBs for each rx ring. */
7279         for (i = 0; i < tp->rx_pending; i++) {
7280                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7281                         netdev_warn(tp->dev,
7282                                     "Using a smaller RX standard ring. Only "
7283                                     "%d out of %d buffers were allocated "
7284                                     "successfully\n", i, tp->rx_pending);
7285                         if (i == 0)
7286                                 goto initfail;
7287                         tp->rx_pending = i;
7288                         break;
7289                 }
7290         }
7291
7292         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7293                 goto done;
7294
7295         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7296
7297         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7298                 goto done;
7299
7300         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7301                 struct tg3_rx_buffer_desc *rxd;
7302
7303                 rxd = &tpr->rx_jmb[i].std;
7304                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7305                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7306                                   RXD_FLAG_JUMBO;
7307                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7308                        (i << RXD_OPAQUE_INDEX_SHIFT));
7309         }
7310
7311         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7312                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7313                         netdev_warn(tp->dev,
7314                                     "Using a smaller RX jumbo ring. Only %d "
7315                                     "out of %d buffers were allocated "
7316                                     "successfully\n", i, tp->rx_jumbo_pending);
7317                         if (i == 0)
7318                                 goto initfail;
7319                         tp->rx_jumbo_pending = i;
7320                         break;
7321                 }
7322         }
7323
7324 done:
7325         return 0;
7326
7327 initfail:
7328         tg3_rx_prodring_free(tp, tpr);
7329         return -ENOMEM;
7330 }
7331
7332 static void tg3_rx_prodring_fini(struct tg3 *tp,
7333                                  struct tg3_rx_prodring_set *tpr)
7334 {
7335         kfree(tpr->rx_std_buffers);
7336         tpr->rx_std_buffers = NULL;
7337         kfree(tpr->rx_jmb_buffers);
7338         tpr->rx_jmb_buffers = NULL;
7339         if (tpr->rx_std) {
7340                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7341                                   tpr->rx_std, tpr->rx_std_mapping);
7342                 tpr->rx_std = NULL;
7343         }
7344         if (tpr->rx_jmb) {
7345                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7346                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7347                 tpr->rx_jmb = NULL;
7348         }
7349 }
7350
7351 static int tg3_rx_prodring_init(struct tg3 *tp,
7352                                 struct tg3_rx_prodring_set *tpr)
7353 {
7354         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7355                                       GFP_KERNEL);
7356         if (!tpr->rx_std_buffers)
7357                 return -ENOMEM;
7358
7359         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7360                                          TG3_RX_STD_RING_BYTES(tp),
7361                                          &tpr->rx_std_mapping,
7362                                          GFP_KERNEL);
7363         if (!tpr->rx_std)
7364                 goto err_out;
7365
7366         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7367                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7368                                               GFP_KERNEL);
7369                 if (!tpr->rx_jmb_buffers)
7370                         goto err_out;
7371
7372                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7373                                                  TG3_RX_JMB_RING_BYTES(tp),
7374                                                  &tpr->rx_jmb_mapping,
7375                                                  GFP_KERNEL);
7376                 if (!tpr->rx_jmb)
7377                         goto err_out;
7378         }
7379
7380         return 0;
7381
7382 err_out:
7383         tg3_rx_prodring_fini(tp, tpr);
7384         return -ENOMEM;
7385 }
7386
7387 /* Free up pending packets in all rx/tx rings.
7388  *
7389  * The chip has been shut down and the driver detached from
7390  * the networking, so no interrupts or new tx packets will
7391  * end up in the driver.  tp->{tx,}lock is not held and we are not
7392  * in an interrupt context and thus may sleep.
7393  */
7394 static void tg3_free_rings(struct tg3 *tp)
7395 {
7396         int i, j;
7397
7398         for (j = 0; j < tp->irq_cnt; j++) {
7399                 struct tg3_napi *tnapi = &tp->napi[j];
7400
7401                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7402
7403                 if (!tnapi->tx_buffers)
7404                         continue;
7405
7406                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7407                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7408
7409                         if (!skb)
7410                                 continue;
7411
7412                         tg3_tx_skb_unmap(tnapi, i,
7413                                          skb_shinfo(skb)->nr_frags - 1);
7414
7415                         dev_kfree_skb_any(skb);
7416                 }
7417                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7418         }
7419 }
7420
7421 /* Initialize tx/rx rings for packet processing.
7422  *
7423  * The chip has been shut down and the driver detached from
7424  * the networking, so no interrupts or new tx packets will
7425  * end up in the driver.  tp->{tx,}lock are held and thus
7426  * we may not sleep.
7427  */
7428 static int tg3_init_rings(struct tg3 *tp)
7429 {
7430         int i;
7431
7432         /* Free up all the SKBs. */
7433         tg3_free_rings(tp);
7434
7435         for (i = 0; i < tp->irq_cnt; i++) {
7436                 struct tg3_napi *tnapi = &tp->napi[i];
7437
7438                 tnapi->last_tag = 0;
7439                 tnapi->last_irq_tag = 0;
7440                 tnapi->hw_status->status = 0;
7441                 tnapi->hw_status->status_tag = 0;
7442                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7443
7444                 tnapi->tx_prod = 0;
7445                 tnapi->tx_cons = 0;
7446                 if (tnapi->tx_ring)
7447                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7448
7449                 tnapi->rx_rcb_ptr = 0;
7450                 if (tnapi->rx_rcb)
7451                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7452
7453                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7454                         tg3_free_rings(tp);
7455                         return -ENOMEM;
7456                 }
7457         }
7458
7459         return 0;
7460 }
7461
7462 /*
7463  * Must not be invoked with interrupt sources disabled and
7464  * the hardware shutdown down.
7465  */
7466 static void tg3_free_consistent(struct tg3 *tp)
7467 {
7468         int i;
7469
7470         for (i = 0; i < tp->irq_cnt; i++) {
7471                 struct tg3_napi *tnapi = &tp->napi[i];
7472
7473                 if (tnapi->tx_ring) {
7474                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7475                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7476                         tnapi->tx_ring = NULL;
7477                 }
7478
7479                 kfree(tnapi->tx_buffers);
7480                 tnapi->tx_buffers = NULL;
7481
7482                 if (tnapi->rx_rcb) {
7483                         dma_free_coherent(&tp->pdev->dev,
7484                                           TG3_RX_RCB_RING_BYTES(tp),
7485                                           tnapi->rx_rcb,
7486                                           tnapi->rx_rcb_mapping);
7487                         tnapi->rx_rcb = NULL;
7488                 }
7489
7490                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7491
7492                 if (tnapi->hw_status) {
7493                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7494                                           tnapi->hw_status,
7495                                           tnapi->status_mapping);
7496                         tnapi->hw_status = NULL;
7497                 }
7498         }
7499
7500         if (tp->hw_stats) {
7501                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7502                                   tp->hw_stats, tp->stats_mapping);
7503                 tp->hw_stats = NULL;
7504         }
7505 }
7506
7507 /*
7508  * Must not be invoked with interrupt sources disabled and
7509  * the hardware shutdown down.  Can sleep.
7510  */
7511 static int tg3_alloc_consistent(struct tg3 *tp)
7512 {
7513         int i;
7514
7515         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7516                                           sizeof(struct tg3_hw_stats),
7517                                           &tp->stats_mapping,
7518                                           GFP_KERNEL);
7519         if (!tp->hw_stats)
7520                 goto err_out;
7521
7522         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7523
7524         for (i = 0; i < tp->irq_cnt; i++) {
7525                 struct tg3_napi *tnapi = &tp->napi[i];
7526                 struct tg3_hw_status *sblk;
7527
7528                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7529                                                       TG3_HW_STATUS_SIZE,
7530                                                       &tnapi->status_mapping,
7531                                                       GFP_KERNEL);
7532                 if (!tnapi->hw_status)
7533                         goto err_out;
7534
7535                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7536                 sblk = tnapi->hw_status;
7537
7538                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7539                         goto err_out;
7540
7541                 /* If multivector TSS is enabled, vector 0 does not handle
7542                  * tx interrupts.  Don't allocate any resources for it.
7543                  */
7544                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7545                     (i && tg3_flag(tp, ENABLE_TSS))) {
7546                         tnapi->tx_buffers = kzalloc(
7547                                                sizeof(struct tg3_tx_ring_info) *
7548                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7549                         if (!tnapi->tx_buffers)
7550                                 goto err_out;
7551
7552                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7553                                                             TG3_TX_RING_BYTES,
7554                                                         &tnapi->tx_desc_mapping,
7555                                                             GFP_KERNEL);
7556                         if (!tnapi->tx_ring)
7557                                 goto err_out;
7558                 }
7559
7560                 /*
7561                  * When RSS is enabled, the status block format changes
7562                  * slightly.  The "rx_jumbo_consumer", "reserved",
7563                  * and "rx_mini_consumer" members get mapped to the
7564                  * other three rx return ring producer indexes.
7565                  */
7566                 switch (i) {
7567                 default:
7568                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7569                         break;
7570                 case 2:
7571                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7572                         break;
7573                 case 3:
7574                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7575                         break;
7576                 case 4:
7577                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7578                         break;
7579                 }
7580
7581                 /*
7582                  * If multivector RSS is enabled, vector 0 does not handle
7583                  * rx or tx interrupts.  Don't allocate any resources for it.
7584                  */
7585                 if (!i && tg3_flag(tp, ENABLE_RSS))
7586                         continue;
7587
7588                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7589                                                    TG3_RX_RCB_RING_BYTES(tp),
7590                                                    &tnapi->rx_rcb_mapping,
7591                                                    GFP_KERNEL);
7592                 if (!tnapi->rx_rcb)
7593                         goto err_out;
7594
7595                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7596         }
7597
7598         return 0;
7599
7600 err_out:
7601         tg3_free_consistent(tp);
7602         return -ENOMEM;
7603 }
7604
7605 #define MAX_WAIT_CNT 1000
7606
7607 /* To stop a block, clear the enable bit and poll till it
7608  * clears.  tp->lock is held.
7609  */
7610 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7611 {
7612         unsigned int i;
7613         u32 val;
7614
7615         if (tg3_flag(tp, 5705_PLUS)) {
7616                 switch (ofs) {
7617                 case RCVLSC_MODE:
7618                 case DMAC_MODE:
7619                 case MBFREE_MODE:
7620                 case BUFMGR_MODE:
7621                 case MEMARB_MODE:
7622                         /* We can't enable/disable these bits of the
7623                          * 5705/5750, just say success.
7624                          */
7625                         return 0;
7626
7627                 default:
7628                         break;
7629                 }
7630         }
7631
7632         val = tr32(ofs);
7633         val &= ~enable_bit;
7634         tw32_f(ofs, val);
7635
7636         for (i = 0; i < MAX_WAIT_CNT; i++) {
7637                 udelay(100);
7638                 val = tr32(ofs);
7639                 if ((val & enable_bit) == 0)
7640                         break;
7641         }
7642
7643         if (i == MAX_WAIT_CNT && !silent) {
7644                 dev_err(&tp->pdev->dev,
7645                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7646                         ofs, enable_bit);
7647                 return -ENODEV;
7648         }
7649
7650         return 0;
7651 }
7652
7653 /* tp->lock is held. */
7654 static int tg3_abort_hw(struct tg3 *tp, int silent)
7655 {
7656         int i, err;
7657
7658         tg3_disable_ints(tp);
7659
7660         tp->rx_mode &= ~RX_MODE_ENABLE;
7661         tw32_f(MAC_RX_MODE, tp->rx_mode);
7662         udelay(10);
7663
7664         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7665         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7666         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7667         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7668         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7669         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7670
7671         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7672         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7673         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7674         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7675         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7676         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7677         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7678
7679         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7680         tw32_f(MAC_MODE, tp->mac_mode);
7681         udelay(40);
7682
7683         tp->tx_mode &= ~TX_MODE_ENABLE;
7684         tw32_f(MAC_TX_MODE, tp->tx_mode);
7685
7686         for (i = 0; i < MAX_WAIT_CNT; i++) {
7687                 udelay(100);
7688                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7689                         break;
7690         }
7691         if (i >= MAX_WAIT_CNT) {
7692                 dev_err(&tp->pdev->dev,
7693                         "%s timed out, TX_MODE_ENABLE will not clear "
7694                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7695                 err |= -ENODEV;
7696         }
7697
7698         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7699         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7700         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7701
7702         tw32(FTQ_RESET, 0xffffffff);
7703         tw32(FTQ_RESET, 0x00000000);
7704
7705         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7706         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7707
7708         for (i = 0; i < tp->irq_cnt; i++) {
7709                 struct tg3_napi *tnapi = &tp->napi[i];
7710                 if (tnapi->hw_status)
7711                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7712         }
7713
7714         return err;
7715 }
7716
7717 /* Save PCI command register before chip reset */
7718 static void tg3_save_pci_state(struct tg3 *tp)
7719 {
7720         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7721 }
7722
7723 /* Restore PCI state after chip reset */
7724 static void tg3_restore_pci_state(struct tg3 *tp)
7725 {
7726         u32 val;
7727
7728         /* Re-enable indirect register accesses. */
7729         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7730                                tp->misc_host_ctrl);
7731
7732         /* Set MAX PCI retry to zero. */
7733         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7734         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7735             tg3_flag(tp, PCIX_MODE))
7736                 val |= PCISTATE_RETRY_SAME_DMA;
7737         /* Allow reads and writes to the APE register and memory space. */
7738         if (tg3_flag(tp, ENABLE_APE))
7739                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7740                        PCISTATE_ALLOW_APE_SHMEM_WR |
7741                        PCISTATE_ALLOW_APE_PSPACE_WR;
7742         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7743
7744         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7745
7746         if (!tg3_flag(tp, PCI_EXPRESS)) {
7747                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7748                                       tp->pci_cacheline_sz);
7749                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7750                                       tp->pci_lat_timer);
7751         }
7752
7753         /* Make sure PCI-X relaxed ordering bit is clear. */
7754         if (tg3_flag(tp, PCIX_MODE)) {
7755                 u16 pcix_cmd;
7756
7757                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7758                                      &pcix_cmd);
7759                 pcix_cmd &= ~PCI_X_CMD_ERO;
7760                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7761                                       pcix_cmd);
7762         }
7763
7764         if (tg3_flag(tp, 5780_CLASS)) {
7765
7766                 /* Chip reset on 5780 will reset MSI enable bit,
7767                  * so need to restore it.
7768                  */
7769                 if (tg3_flag(tp, USING_MSI)) {
7770                         u16 ctrl;
7771
7772                         pci_read_config_word(tp->pdev,
7773                                              tp->msi_cap + PCI_MSI_FLAGS,
7774                                              &ctrl);
7775                         pci_write_config_word(tp->pdev,
7776                                               tp->msi_cap + PCI_MSI_FLAGS,
7777                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7778                         val = tr32(MSGINT_MODE);
7779                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7780                 }
7781         }
7782 }
7783
7784 /* tp->lock is held. */
7785 static int tg3_chip_reset(struct tg3 *tp)
7786 {
7787         u32 val;
7788         void (*write_op)(struct tg3 *, u32, u32);
7789         int i, err;
7790
7791         tg3_nvram_lock(tp);
7792
7793         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7794
7795         /* No matching tg3_nvram_unlock() after this because
7796          * chip reset below will undo the nvram lock.
7797          */
7798         tp->nvram_lock_cnt = 0;
7799
7800         /* GRC_MISC_CFG core clock reset will clear the memory
7801          * enable bit in PCI register 4 and the MSI enable bit
7802          * on some chips, so we save relevant registers here.
7803          */
7804         tg3_save_pci_state(tp);
7805
7806         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7807             tg3_flag(tp, 5755_PLUS))
7808                 tw32(GRC_FASTBOOT_PC, 0);
7809
7810         /*
7811          * We must avoid the readl() that normally takes place.
7812          * It locks machines, causes machine checks, and other
7813          * fun things.  So, temporarily disable the 5701
7814          * hardware workaround, while we do the reset.
7815          */
7816         write_op = tp->write32;
7817         if (write_op == tg3_write_flush_reg32)
7818                 tp->write32 = tg3_write32;
7819
7820         /* Prevent the irq handler from reading or writing PCI registers
7821          * during chip reset when the memory enable bit in the PCI command
7822          * register may be cleared.  The chip does not generate interrupt
7823          * at this time, but the irq handler may still be called due to irq
7824          * sharing or irqpoll.
7825          */
7826         tg3_flag_set(tp, CHIP_RESETTING);
7827         for (i = 0; i < tp->irq_cnt; i++) {
7828                 struct tg3_napi *tnapi = &tp->napi[i];
7829                 if (tnapi->hw_status) {
7830                         tnapi->hw_status->status = 0;
7831                         tnapi->hw_status->status_tag = 0;
7832                 }
7833                 tnapi->last_tag = 0;
7834                 tnapi->last_irq_tag = 0;
7835         }
7836         smp_mb();
7837
7838         for (i = 0; i < tp->irq_cnt; i++)
7839                 synchronize_irq(tp->napi[i].irq_vec);
7840
7841         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7842                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7843                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7844         }
7845
7846         /* do the reset */
7847         val = GRC_MISC_CFG_CORECLK_RESET;
7848
7849         if (tg3_flag(tp, PCI_EXPRESS)) {
7850                 /* Force PCIe 1.0a mode */
7851                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7852                     !tg3_flag(tp, 57765_PLUS) &&
7853                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7854                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7855                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7856
7857                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7858                         tw32(GRC_MISC_CFG, (1 << 29));
7859                         val |= (1 << 29);
7860                 }
7861         }
7862
7863         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7864                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7865                 tw32(GRC_VCPU_EXT_CTRL,
7866                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7867         }
7868
7869         /* Manage gphy power for all CPMU absent PCIe devices. */
7870         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7871                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7872
7873         tw32(GRC_MISC_CFG, val);
7874
7875         /* restore 5701 hardware bug workaround write method */
7876         tp->write32 = write_op;
7877
7878         /* Unfortunately, we have to delay before the PCI read back.
7879          * Some 575X chips even will not respond to a PCI cfg access
7880          * when the reset command is given to the chip.
7881          *
7882          * How do these hardware designers expect things to work
7883          * properly if the PCI write is posted for a long period
7884          * of time?  It is always necessary to have some method by
7885          * which a register read back can occur to push the write
7886          * out which does the reset.
7887          *
7888          * For most tg3 variants the trick below was working.
7889          * Ho hum...
7890          */
7891         udelay(120);
7892
7893         /* Flush PCI posted writes.  The normal MMIO registers
7894          * are inaccessible at this time so this is the only
7895          * way to make this reliably (actually, this is no longer
7896          * the case, see above).  I tried to use indirect
7897          * register read/write but this upset some 5701 variants.
7898          */
7899         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7900
7901         udelay(120);
7902
7903         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7904                 u16 val16;
7905
7906                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7907                         int i;
7908                         u32 cfg_val;
7909
7910                         /* Wait for link training to complete.  */
7911                         for (i = 0; i < 5000; i++)
7912                                 udelay(100);
7913
7914                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7915                         pci_write_config_dword(tp->pdev, 0xc4,
7916                                                cfg_val | (1 << 15));
7917                 }
7918
7919                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7920                 pci_read_config_word(tp->pdev,
7921                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7922                                      &val16);
7923                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7924                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7925                 /*
7926                  * Older PCIe devices only support the 128 byte
7927                  * MPS setting.  Enforce the restriction.
7928                  */
7929                 if (!tg3_flag(tp, CPMU_PRESENT))
7930                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7931                 pci_write_config_word(tp->pdev,
7932                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7933                                       val16);
7934
7935                 /* Clear error status */
7936                 pci_write_config_word(tp->pdev,
7937                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7938                                       PCI_EXP_DEVSTA_CED |
7939                                       PCI_EXP_DEVSTA_NFED |
7940                                       PCI_EXP_DEVSTA_FED |
7941                                       PCI_EXP_DEVSTA_URD);
7942         }
7943
7944         tg3_restore_pci_state(tp);
7945
7946         tg3_flag_clear(tp, CHIP_RESETTING);
7947         tg3_flag_clear(tp, ERROR_PROCESSED);
7948
7949         val = 0;
7950         if (tg3_flag(tp, 5780_CLASS))
7951                 val = tr32(MEMARB_MODE);
7952         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7953
7954         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7955                 tg3_stop_fw(tp);
7956                 tw32(0x5000, 0x400);
7957         }
7958
7959         tw32(GRC_MODE, tp->grc_mode);
7960
7961         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7962                 val = tr32(0xc4);
7963
7964                 tw32(0xc4, val | (1 << 15));
7965         }
7966
7967         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7968             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7969                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7970                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7971                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7972                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7973         }
7974
7975         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7976                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7977                 val = tp->mac_mode;
7978         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7979                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7980                 val = tp->mac_mode;
7981         } else
7982                 val = 0;
7983
7984         tw32_f(MAC_MODE, val);
7985         udelay(40);
7986
7987         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7988
7989         err = tg3_poll_fw(tp);
7990         if (err)
7991                 return err;
7992
7993         tg3_mdio_start(tp);
7994
7995         if (tg3_flag(tp, PCI_EXPRESS) &&
7996             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7997             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7998             !tg3_flag(tp, 57765_PLUS)) {
7999                 val = tr32(0x7c00);
8000
8001                 tw32(0x7c00, val | (1 << 25));
8002         }
8003
8004         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8005                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8006                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8007         }
8008
8009         /* Reprobe ASF enable state.  */
8010         tg3_flag_clear(tp, ENABLE_ASF);
8011         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8012         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8013         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8014                 u32 nic_cfg;
8015
8016                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8017                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8018                         tg3_flag_set(tp, ENABLE_ASF);
8019                         tp->last_event_jiffies = jiffies;
8020                         if (tg3_flag(tp, 5750_PLUS))
8021                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8022                 }
8023         }
8024
8025         return 0;
8026 }
8027
8028 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8029 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8030
8031 /* tp->lock is held. */
8032 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8033 {
8034         int err;
8035
8036         tg3_stop_fw(tp);
8037
8038         tg3_write_sig_pre_reset(tp, kind);
8039
8040         tg3_abort_hw(tp, silent);
8041         err = tg3_chip_reset(tp);
8042
8043         __tg3_set_mac_addr(tp, 0);
8044
8045         tg3_write_sig_legacy(tp, kind);
8046         tg3_write_sig_post_reset(tp, kind);
8047
8048         if (tp->hw_stats) {
8049                 /* Save the stats across chip resets... */
8050                 tg3_get_nstats(tp, &tp->net_stats_prev);
8051                 tg3_get_estats(tp, &tp->estats_prev);
8052
8053                 /* And make sure the next sample is new data */
8054                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8055         }
8056
8057         if (err)
8058                 return err;
8059
8060         return 0;
8061 }
8062
8063 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8064 {
8065         struct tg3 *tp = netdev_priv(dev);
8066         struct sockaddr *addr = p;
8067         int err = 0, skip_mac_1 = 0;
8068
8069         if (!is_valid_ether_addr(addr->sa_data))
8070                 return -EADDRNOTAVAIL;
8071
8072         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8073
8074         if (!netif_running(dev))
8075                 return 0;
8076
8077         if (tg3_flag(tp, ENABLE_ASF)) {
8078                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8079
8080                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8081                 addr0_low = tr32(MAC_ADDR_0_LOW);
8082                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8083                 addr1_low = tr32(MAC_ADDR_1_LOW);
8084
8085                 /* Skip MAC addr 1 if ASF is using it. */
8086                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8087                     !(addr1_high == 0 && addr1_low == 0))
8088                         skip_mac_1 = 1;
8089         }
8090         spin_lock_bh(&tp->lock);
8091         __tg3_set_mac_addr(tp, skip_mac_1);
8092         spin_unlock_bh(&tp->lock);
8093
8094         return err;
8095 }
8096
8097 /* tp->lock is held. */
8098 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8099                            dma_addr_t mapping, u32 maxlen_flags,
8100                            u32 nic_addr)
8101 {
8102         tg3_write_mem(tp,
8103                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8104                       ((u64) mapping >> 32));
8105         tg3_write_mem(tp,
8106                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8107                       ((u64) mapping & 0xffffffff));
8108         tg3_write_mem(tp,
8109                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8110                        maxlen_flags);
8111
8112         if (!tg3_flag(tp, 5705_PLUS))
8113                 tg3_write_mem(tp,
8114                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8115                               nic_addr);
8116 }
8117
8118 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8119 {
8120         int i;
8121
8122         if (!tg3_flag(tp, ENABLE_TSS)) {
8123                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8124                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8125                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8126         } else {
8127                 tw32(HOSTCC_TXCOL_TICKS, 0);
8128                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8129                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8130         }
8131
8132         if (!tg3_flag(tp, ENABLE_RSS)) {
8133                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8134                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8135                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8136         } else {
8137                 tw32(HOSTCC_RXCOL_TICKS, 0);
8138                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8139                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8140         }
8141
8142         if (!tg3_flag(tp, 5705_PLUS)) {
8143                 u32 val = ec->stats_block_coalesce_usecs;
8144
8145                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8146                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8147
8148                 if (!netif_carrier_ok(tp->dev))
8149                         val = 0;
8150
8151                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8152         }
8153
8154         for (i = 0; i < tp->irq_cnt - 1; i++) {
8155                 u32 reg;
8156
8157                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8158                 tw32(reg, ec->rx_coalesce_usecs);
8159                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8160                 tw32(reg, ec->rx_max_coalesced_frames);
8161                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8162                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8163
8164                 if (tg3_flag(tp, ENABLE_TSS)) {
8165                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8166                         tw32(reg, ec->tx_coalesce_usecs);
8167                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8168                         tw32(reg, ec->tx_max_coalesced_frames);
8169                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8170                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8171                 }
8172         }
8173
8174         for (; i < tp->irq_max - 1; i++) {
8175                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8176                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8177                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8178
8179                 if (tg3_flag(tp, ENABLE_TSS)) {
8180                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8181                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8182                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8183                 }
8184         }
8185 }
8186
8187 /* tp->lock is held. */
8188 static void tg3_rings_reset(struct tg3 *tp)
8189 {
8190         int i;
8191         u32 stblk, txrcb, rxrcb, limit;
8192         struct tg3_napi *tnapi = &tp->napi[0];
8193
8194         /* Disable all transmit rings but the first. */
8195         if (!tg3_flag(tp, 5705_PLUS))
8196                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8197         else if (tg3_flag(tp, 5717_PLUS))
8198                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8199         else if (tg3_flag(tp, 57765_CLASS))
8200                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8201         else
8202                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8203
8204         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8205              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8206                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8207                               BDINFO_FLAGS_DISABLED);
8208
8209
8210         /* Disable all receive return rings but the first. */
8211         if (tg3_flag(tp, 5717_PLUS))
8212                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8213         else if (!tg3_flag(tp, 5705_PLUS))
8214                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8215         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8216                  tg3_flag(tp, 57765_CLASS))
8217                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8218         else
8219                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8220
8221         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8222              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8223                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8224                               BDINFO_FLAGS_DISABLED);
8225
8226         /* Disable interrupts */
8227         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8228         tp->napi[0].chk_msi_cnt = 0;
8229         tp->napi[0].last_rx_cons = 0;
8230         tp->napi[0].last_tx_cons = 0;
8231
8232         /* Zero mailbox registers. */
8233         if (tg3_flag(tp, SUPPORT_MSIX)) {
8234                 for (i = 1; i < tp->irq_max; i++) {
8235                         tp->napi[i].tx_prod = 0;
8236                         tp->napi[i].tx_cons = 0;
8237                         if (tg3_flag(tp, ENABLE_TSS))
8238                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8239                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8240                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8241                         tp->napi[i].chk_msi_cnt = 0;
8242                         tp->napi[i].last_rx_cons = 0;
8243                         tp->napi[i].last_tx_cons = 0;
8244                 }
8245                 if (!tg3_flag(tp, ENABLE_TSS))
8246                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8247         } else {
8248                 tp->napi[0].tx_prod = 0;
8249                 tp->napi[0].tx_cons = 0;
8250                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8251                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8252         }
8253
8254         /* Make sure the NIC-based send BD rings are disabled. */
8255         if (!tg3_flag(tp, 5705_PLUS)) {
8256                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8257                 for (i = 0; i < 16; i++)
8258                         tw32_tx_mbox(mbox + i * 8, 0);
8259         }
8260
8261         txrcb = NIC_SRAM_SEND_RCB;
8262         rxrcb = NIC_SRAM_RCV_RET_RCB;
8263
8264         /* Clear status block in ram. */
8265         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8266
8267         /* Set status block DMA address */
8268         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8269              ((u64) tnapi->status_mapping >> 32));
8270         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8271              ((u64) tnapi->status_mapping & 0xffffffff));
8272
8273         if (tnapi->tx_ring) {
8274                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8275                                (TG3_TX_RING_SIZE <<
8276                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8277                                NIC_SRAM_TX_BUFFER_DESC);
8278                 txrcb += TG3_BDINFO_SIZE;
8279         }
8280
8281         if (tnapi->rx_rcb) {
8282                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8283                                (tp->rx_ret_ring_mask + 1) <<
8284                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8285                 rxrcb += TG3_BDINFO_SIZE;
8286         }
8287
8288         stblk = HOSTCC_STATBLCK_RING1;
8289
8290         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8291                 u64 mapping = (u64)tnapi->status_mapping;
8292                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8293                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8294
8295                 /* Clear status block in ram. */
8296                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8297
8298                 if (tnapi->tx_ring) {
8299                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8300                                        (TG3_TX_RING_SIZE <<
8301                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8302                                        NIC_SRAM_TX_BUFFER_DESC);
8303                         txrcb += TG3_BDINFO_SIZE;
8304                 }
8305
8306                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8307                                ((tp->rx_ret_ring_mask + 1) <<
8308                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8309
8310                 stblk += 8;
8311                 rxrcb += TG3_BDINFO_SIZE;
8312         }
8313 }
8314
8315 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8316 {
8317         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8318
8319         if (!tg3_flag(tp, 5750_PLUS) ||
8320             tg3_flag(tp, 5780_CLASS) ||
8321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8323             tg3_flag(tp, 57765_PLUS))
8324                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8325         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8326                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8327                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8328         else
8329                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8330
8331         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8332         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8333
8334         val = min(nic_rep_thresh, host_rep_thresh);
8335         tw32(RCVBDI_STD_THRESH, val);
8336
8337         if (tg3_flag(tp, 57765_PLUS))
8338                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8339
8340         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8341                 return;
8342
8343         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8344
8345         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8346
8347         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8348         tw32(RCVBDI_JUMBO_THRESH, val);
8349
8350         if (tg3_flag(tp, 57765_PLUS))
8351                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8352 }
8353
8354 static inline u32 calc_crc(unsigned char *buf, int len)
8355 {
8356         u32 reg;
8357         u32 tmp;
8358         int j, k;
8359
8360         reg = 0xffffffff;
8361
8362         for (j = 0; j < len; j++) {
8363                 reg ^= buf[j];
8364
8365                 for (k = 0; k < 8; k++) {
8366                         tmp = reg & 0x01;
8367
8368                         reg >>= 1;
8369
8370                         if (tmp)
8371                                 reg ^= 0xedb88320;
8372                 }
8373         }
8374
8375         return ~reg;
8376 }
8377
8378 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8379 {
8380         /* accept or reject all multicast frames */
8381         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8382         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8383         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8384         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8385 }
8386
8387 static void __tg3_set_rx_mode(struct net_device *dev)
8388 {
8389         struct tg3 *tp = netdev_priv(dev);
8390         u32 rx_mode;
8391
8392         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8393                                   RX_MODE_KEEP_VLAN_TAG);
8394
8395 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8396         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8397          * flag clear.
8398          */
8399         if (!tg3_flag(tp, ENABLE_ASF))
8400                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8401 #endif
8402
8403         if (dev->flags & IFF_PROMISC) {
8404                 /* Promiscuous mode. */
8405                 rx_mode |= RX_MODE_PROMISC;
8406         } else if (dev->flags & IFF_ALLMULTI) {
8407                 /* Accept all multicast. */
8408                 tg3_set_multi(tp, 1);
8409         } else if (netdev_mc_empty(dev)) {
8410                 /* Reject all multicast. */
8411                 tg3_set_multi(tp, 0);
8412         } else {
8413                 /* Accept one or more multicast(s). */
8414                 struct netdev_hw_addr *ha;
8415                 u32 mc_filter[4] = { 0, };
8416                 u32 regidx;
8417                 u32 bit;
8418                 u32 crc;
8419
8420                 netdev_for_each_mc_addr(ha, dev) {
8421                         crc = calc_crc(ha->addr, ETH_ALEN);
8422                         bit = ~crc & 0x7f;
8423                         regidx = (bit & 0x60) >> 5;
8424                         bit &= 0x1f;
8425                         mc_filter[regidx] |= (1 << bit);
8426                 }
8427
8428                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8429                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8430                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8431                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8432         }
8433
8434         if (rx_mode != tp->rx_mode) {
8435                 tp->rx_mode = rx_mode;
8436                 tw32_f(MAC_RX_MODE, rx_mode);
8437                 udelay(10);
8438         }
8439 }
8440
8441 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8442 {
8443         int i;
8444
8445         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8446                 tp->rss_ind_tbl[i] =
8447                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8448 }
8449
8450 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8451 {
8452         int i;
8453
8454         if (!tg3_flag(tp, SUPPORT_MSIX))
8455                 return;
8456
8457         if (tp->irq_cnt <= 2) {
8458                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8459                 return;
8460         }
8461
8462         /* Validate table against current IRQ count */
8463         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8464                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8465                         break;
8466         }
8467
8468         if (i != TG3_RSS_INDIR_TBL_SIZE)
8469                 tg3_rss_init_dflt_indir_tbl(tp);
8470 }
8471
8472 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8473 {
8474         int i = 0;
8475         u32 reg = MAC_RSS_INDIR_TBL_0;
8476
8477         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8478                 u32 val = tp->rss_ind_tbl[i];
8479                 i++;
8480                 for (; i % 8; i++) {
8481                         val <<= 4;
8482                         val |= tp->rss_ind_tbl[i];
8483                 }
8484                 tw32(reg, val);
8485                 reg += 4;
8486         }
8487 }
8488
8489 /* tp->lock is held. */
8490 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8491 {
8492         u32 val, rdmac_mode;
8493         int i, err, limit;
8494         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8495
8496         tg3_disable_ints(tp);
8497
8498         tg3_stop_fw(tp);
8499
8500         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8501
8502         if (tg3_flag(tp, INIT_COMPLETE))
8503                 tg3_abort_hw(tp, 1);
8504
8505         /* Enable MAC control of LPI */
8506         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8507                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8508                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8509                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8510
8511                 tw32_f(TG3_CPMU_EEE_CTRL,
8512                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8513
8514                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8515                       TG3_CPMU_EEEMD_LPI_IN_TX |
8516                       TG3_CPMU_EEEMD_LPI_IN_RX |
8517                       TG3_CPMU_EEEMD_EEE_ENABLE;
8518
8519                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8520                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8521
8522                 if (tg3_flag(tp, ENABLE_APE))
8523                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8524
8525                 tw32_f(TG3_CPMU_EEE_MODE, val);
8526
8527                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8528                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8529                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8530
8531                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8532                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8533                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8534         }
8535
8536         if (reset_phy)
8537                 tg3_phy_reset(tp);
8538
8539         err = tg3_chip_reset(tp);
8540         if (err)
8541                 return err;
8542
8543         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8544
8545         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8546                 val = tr32(TG3_CPMU_CTRL);
8547                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8548                 tw32(TG3_CPMU_CTRL, val);
8549
8550                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8551                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8552                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8553                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8554
8555                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8556                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8557                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8558                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8559
8560                 val = tr32(TG3_CPMU_HST_ACC);
8561                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8562                 val |= CPMU_HST_ACC_MACCLK_6_25;
8563                 tw32(TG3_CPMU_HST_ACC, val);
8564         }
8565
8566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8567                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8568                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8569                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8570                 tw32(PCIE_PWR_MGMT_THRESH, val);
8571
8572                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8573                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8574
8575                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8576
8577                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8578                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8579         }
8580
8581         if (tg3_flag(tp, L1PLLPD_EN)) {
8582                 u32 grc_mode = tr32(GRC_MODE);
8583
8584                 /* Access the lower 1K of PL PCIE block registers. */
8585                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8586                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8587
8588                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8589                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8590                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8591
8592                 tw32(GRC_MODE, grc_mode);
8593         }
8594
8595         if (tg3_flag(tp, 57765_CLASS)) {
8596                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8597                         u32 grc_mode = tr32(GRC_MODE);
8598
8599                         /* Access the lower 1K of PL PCIE block registers. */
8600                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8601                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8602
8603                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8604                                    TG3_PCIE_PL_LO_PHYCTL5);
8605                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8606                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8607
8608                         tw32(GRC_MODE, grc_mode);
8609                 }
8610
8611                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8612                         u32 grc_mode = tr32(GRC_MODE);
8613
8614                         /* Access the lower 1K of DL PCIE block registers. */
8615                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8616                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8617
8618                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8619                                    TG3_PCIE_DL_LO_FTSMAX);
8620                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8621                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8622                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8623
8624                         tw32(GRC_MODE, grc_mode);
8625                 }
8626
8627                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8628                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8629                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8630                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8631         }
8632
8633         /* This works around an issue with Athlon chipsets on
8634          * B3 tigon3 silicon.  This bit has no effect on any
8635          * other revision.  But do not set this on PCI Express
8636          * chips and don't even touch the clocks if the CPMU is present.
8637          */
8638         if (!tg3_flag(tp, CPMU_PRESENT)) {
8639                 if (!tg3_flag(tp, PCI_EXPRESS))
8640                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8641                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8642         }
8643
8644         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8645             tg3_flag(tp, PCIX_MODE)) {
8646                 val = tr32(TG3PCI_PCISTATE);
8647                 val |= PCISTATE_RETRY_SAME_DMA;
8648                 tw32(TG3PCI_PCISTATE, val);
8649         }
8650
8651         if (tg3_flag(tp, ENABLE_APE)) {
8652                 /* Allow reads and writes to the
8653                  * APE register and memory space.
8654                  */
8655                 val = tr32(TG3PCI_PCISTATE);
8656                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8657                        PCISTATE_ALLOW_APE_SHMEM_WR |
8658                        PCISTATE_ALLOW_APE_PSPACE_WR;
8659                 tw32(TG3PCI_PCISTATE, val);
8660         }
8661
8662         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8663                 /* Enable some hw fixes.  */
8664                 val = tr32(TG3PCI_MSI_DATA);
8665                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8666                 tw32(TG3PCI_MSI_DATA, val);
8667         }
8668
8669         /* Descriptor ring init may make accesses to the
8670          * NIC SRAM area to setup the TX descriptors, so we
8671          * can only do this after the hardware has been
8672          * successfully reset.
8673          */
8674         err = tg3_init_rings(tp);
8675         if (err)
8676                 return err;
8677
8678         if (tg3_flag(tp, 57765_PLUS)) {
8679                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8680                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8681                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8682                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8683                 if (!tg3_flag(tp, 57765_CLASS) &&
8684                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8685                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8686                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8687         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8688                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8689                 /* This value is determined during the probe time DMA
8690                  * engine test, tg3_test_dma.
8691                  */
8692                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8693         }
8694
8695         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8696                           GRC_MODE_4X_NIC_SEND_RINGS |
8697                           GRC_MODE_NO_TX_PHDR_CSUM |
8698                           GRC_MODE_NO_RX_PHDR_CSUM);
8699         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8700
8701         /* Pseudo-header checksum is done by hardware logic and not
8702          * the offload processers, so make the chip do the pseudo-
8703          * header checksums on receive.  For transmit it is more
8704          * convenient to do the pseudo-header checksum in software
8705          * as Linux does that on transmit for us in all cases.
8706          */
8707         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8708
8709         tw32(GRC_MODE,
8710              tp->grc_mode |
8711              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8712
8713         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8714         val = tr32(GRC_MISC_CFG);
8715         val &= ~0xff;
8716         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8717         tw32(GRC_MISC_CFG, val);
8718
8719         /* Initialize MBUF/DESC pool. */
8720         if (tg3_flag(tp, 5750_PLUS)) {
8721                 /* Do nothing.  */
8722         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8723                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8724                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8725                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8726                 else
8727                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8728                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8729                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8730         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8731                 int fw_len;
8732
8733                 fw_len = tp->fw_len;
8734                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8735                 tw32(BUFMGR_MB_POOL_ADDR,
8736                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8737                 tw32(BUFMGR_MB_POOL_SIZE,
8738                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8739         }
8740
8741         if (tp->dev->mtu <= ETH_DATA_LEN) {
8742                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8743                      tp->bufmgr_config.mbuf_read_dma_low_water);
8744                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8745                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8746                 tw32(BUFMGR_MB_HIGH_WATER,
8747                      tp->bufmgr_config.mbuf_high_water);
8748         } else {
8749                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8750                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8751                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8752                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8753                 tw32(BUFMGR_MB_HIGH_WATER,
8754                      tp->bufmgr_config.mbuf_high_water_jumbo);
8755         }
8756         tw32(BUFMGR_DMA_LOW_WATER,
8757              tp->bufmgr_config.dma_low_water);
8758         tw32(BUFMGR_DMA_HIGH_WATER,
8759              tp->bufmgr_config.dma_high_water);
8760
8761         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8762         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8763                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8764         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8765             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8766             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8767                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8768         tw32(BUFMGR_MODE, val);
8769         for (i = 0; i < 2000; i++) {
8770                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8771                         break;
8772                 udelay(10);
8773         }
8774         if (i >= 2000) {
8775                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8776                 return -ENODEV;
8777         }
8778
8779         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8780                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8781
8782         tg3_setup_rxbd_thresholds(tp);
8783
8784         /* Initialize TG3_BDINFO's at:
8785          *  RCVDBDI_STD_BD:     standard eth size rx ring
8786          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8787          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8788          *
8789          * like so:
8790          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8791          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8792          *                              ring attribute flags
8793          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8794          *
8795          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8796          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8797          *
8798          * The size of each ring is fixed in the firmware, but the location is
8799          * configurable.
8800          */
8801         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8802              ((u64) tpr->rx_std_mapping >> 32));
8803         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8804              ((u64) tpr->rx_std_mapping & 0xffffffff));
8805         if (!tg3_flag(tp, 5717_PLUS))
8806                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8807                      NIC_SRAM_RX_BUFFER_DESC);
8808
8809         /* Disable the mini ring */
8810         if (!tg3_flag(tp, 5705_PLUS))
8811                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8812                      BDINFO_FLAGS_DISABLED);
8813
8814         /* Program the jumbo buffer descriptor ring control
8815          * blocks on those devices that have them.
8816          */
8817         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8818             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8819
8820                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8821                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8822                              ((u64) tpr->rx_jmb_mapping >> 32));
8823                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8824                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8825                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8826                               BDINFO_FLAGS_MAXLEN_SHIFT;
8827                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8828                              val | BDINFO_FLAGS_USE_EXT_RECV);
8829                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8830                             tg3_flag(tp, 57765_CLASS))
8831                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8832                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8833                 } else {
8834                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8835                              BDINFO_FLAGS_DISABLED);
8836                 }
8837
8838                 if (tg3_flag(tp, 57765_PLUS)) {
8839                         val = TG3_RX_STD_RING_SIZE(tp);
8840                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8841                         val |= (TG3_RX_STD_DMA_SZ << 2);
8842                 } else
8843                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8844         } else
8845                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8846
8847         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8848
8849         tpr->rx_std_prod_idx = tp->rx_pending;
8850         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8851
8852         tpr->rx_jmb_prod_idx =
8853                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8854         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8855
8856         tg3_rings_reset(tp);
8857
8858         /* Initialize MAC address and backoff seed. */
8859         __tg3_set_mac_addr(tp, 0);
8860
8861         /* MTU + ethernet header + FCS + optional VLAN tag */
8862         tw32(MAC_RX_MTU_SIZE,
8863              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8864
8865         /* The slot time is changed by tg3_setup_phy if we
8866          * run at gigabit with half duplex.
8867          */
8868         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8869               (6 << TX_LENGTHS_IPG_SHIFT) |
8870               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8871
8872         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8873                 val |= tr32(MAC_TX_LENGTHS) &
8874                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8875                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8876
8877         tw32(MAC_TX_LENGTHS, val);
8878
8879         /* Receive rules. */
8880         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8881         tw32(RCVLPC_CONFIG, 0x0181);
8882
8883         /* Calculate RDMAC_MODE setting early, we need it to determine
8884          * the RCVLPC_STATE_ENABLE mask.
8885          */
8886         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8887                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8888                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8889                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8890                       RDMAC_MODE_LNGREAD_ENAB);
8891
8892         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8893                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8894
8895         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8896             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8897             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8898                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8899                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8900                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8901
8902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8903             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8904                 if (tg3_flag(tp, TSO_CAPABLE) &&
8905                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8906                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8907                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8908                            !tg3_flag(tp, IS_5788)) {
8909                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8910                 }
8911         }
8912
8913         if (tg3_flag(tp, PCI_EXPRESS))
8914                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8915
8916         if (tg3_flag(tp, HW_TSO_1) ||
8917             tg3_flag(tp, HW_TSO_2) ||
8918             tg3_flag(tp, HW_TSO_3))
8919                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8920
8921         if (tg3_flag(tp, 57765_PLUS) ||
8922             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8924                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8925
8926         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8927                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8928
8929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8930             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8932             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8933             tg3_flag(tp, 57765_PLUS)) {
8934                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8935                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8936                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8937                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8938                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8939                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8940                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8941                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8942                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8943                 }
8944                 tw32(TG3_RDMA_RSRVCTRL_REG,
8945                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8946         }
8947
8948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8950                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8951                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8952                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8953                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8954         }
8955
8956         /* Receive/send statistics. */
8957         if (tg3_flag(tp, 5750_PLUS)) {
8958                 val = tr32(RCVLPC_STATS_ENABLE);
8959                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8960                 tw32(RCVLPC_STATS_ENABLE, val);
8961         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8962                    tg3_flag(tp, TSO_CAPABLE)) {
8963                 val = tr32(RCVLPC_STATS_ENABLE);
8964                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8965                 tw32(RCVLPC_STATS_ENABLE, val);
8966         } else {
8967                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8968         }
8969         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8970         tw32(SNDDATAI_STATSENAB, 0xffffff);
8971         tw32(SNDDATAI_STATSCTRL,
8972              (SNDDATAI_SCTRL_ENABLE |
8973               SNDDATAI_SCTRL_FASTUPD));
8974
8975         /* Setup host coalescing engine. */
8976         tw32(HOSTCC_MODE, 0);
8977         for (i = 0; i < 2000; i++) {
8978                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8979                         break;
8980                 udelay(10);
8981         }
8982
8983         __tg3_set_coalesce(tp, &tp->coal);
8984
8985         if (!tg3_flag(tp, 5705_PLUS)) {
8986                 /* Status/statistics block address.  See tg3_timer,
8987                  * the tg3_periodic_fetch_stats call there, and
8988                  * tg3_get_stats to see how this works for 5705/5750 chips.
8989                  */
8990                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8991                      ((u64) tp->stats_mapping >> 32));
8992                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8993                      ((u64) tp->stats_mapping & 0xffffffff));
8994                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8995
8996                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8997
8998                 /* Clear statistics and status block memory areas */
8999                 for (i = NIC_SRAM_STATS_BLK;
9000                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9001                      i += sizeof(u32)) {
9002                         tg3_write_mem(tp, i, 0);
9003                         udelay(40);
9004                 }
9005         }
9006
9007         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9008
9009         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9010         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9011         if (!tg3_flag(tp, 5705_PLUS))
9012                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9013
9014         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9015                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9016                 /* reset to prevent losing 1st rx packet intermittently */
9017                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9018                 udelay(10);
9019         }
9020
9021         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9022                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9023                         MAC_MODE_FHDE_ENABLE;
9024         if (tg3_flag(tp, ENABLE_APE))
9025                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9026         if (!tg3_flag(tp, 5705_PLUS) &&
9027             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9028             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9029                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9030         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9031         udelay(40);
9032
9033         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9034          * If TG3_FLAG_IS_NIC is zero, we should read the
9035          * register to preserve the GPIO settings for LOMs. The GPIOs,
9036          * whether used as inputs or outputs, are set by boot code after
9037          * reset.
9038          */
9039         if (!tg3_flag(tp, IS_NIC)) {
9040                 u32 gpio_mask;
9041
9042                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9043                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9044                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9045
9046                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9047                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9048                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9049
9050                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9051                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9052
9053                 tp->grc_local_ctrl &= ~gpio_mask;
9054                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9055
9056                 /* GPIO1 must be driven high for eeprom write protect */
9057                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9058                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9059                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9060         }
9061         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9062         udelay(100);
9063
9064         if (tg3_flag(tp, USING_MSIX)) {
9065                 val = tr32(MSGINT_MODE);
9066                 val |= MSGINT_MODE_ENABLE;
9067                 if (tp->irq_cnt > 1)
9068                         val |= MSGINT_MODE_MULTIVEC_EN;
9069                 if (!tg3_flag(tp, 1SHOT_MSI))
9070                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9071                 tw32(MSGINT_MODE, val);
9072         }
9073
9074         if (!tg3_flag(tp, 5705_PLUS)) {
9075                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9076                 udelay(40);
9077         }
9078
9079         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9080                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9081                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9082                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9083                WDMAC_MODE_LNGREAD_ENAB);
9084
9085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9086             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9087                 if (tg3_flag(tp, TSO_CAPABLE) &&
9088                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9089                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9090                         /* nothing */
9091                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9092                            !tg3_flag(tp, IS_5788)) {
9093                         val |= WDMAC_MODE_RX_ACCEL;
9094                 }
9095         }
9096
9097         /* Enable host coalescing bug fix */
9098         if (tg3_flag(tp, 5755_PLUS))
9099                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9100
9101         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9102                 val |= WDMAC_MODE_BURST_ALL_DATA;
9103
9104         tw32_f(WDMAC_MODE, val);
9105         udelay(40);
9106
9107         if (tg3_flag(tp, PCIX_MODE)) {
9108                 u16 pcix_cmd;
9109
9110                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9111                                      &pcix_cmd);
9112                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9113                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9114                         pcix_cmd |= PCI_X_CMD_READ_2K;
9115                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9116                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9117                         pcix_cmd |= PCI_X_CMD_READ_2K;
9118                 }
9119                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9120                                       pcix_cmd);
9121         }
9122
9123         tw32_f(RDMAC_MODE, rdmac_mode);
9124         udelay(40);
9125
9126         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9127         if (!tg3_flag(tp, 5705_PLUS))
9128                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9129
9130         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9131                 tw32(SNDDATAC_MODE,
9132                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9133         else
9134                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9135
9136         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9137         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9138         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9139         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9140                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9141         tw32(RCVDBDI_MODE, val);
9142         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9143         if (tg3_flag(tp, HW_TSO_1) ||
9144             tg3_flag(tp, HW_TSO_2) ||
9145             tg3_flag(tp, HW_TSO_3))
9146                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9147         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9148         if (tg3_flag(tp, ENABLE_TSS))
9149                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9150         tw32(SNDBDI_MODE, val);
9151         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9152
9153         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9154                 err = tg3_load_5701_a0_firmware_fix(tp);
9155                 if (err)
9156                         return err;
9157         }
9158
9159         if (tg3_flag(tp, TSO_CAPABLE)) {
9160                 err = tg3_load_tso_firmware(tp);
9161                 if (err)
9162                         return err;
9163         }
9164
9165         tp->tx_mode = TX_MODE_ENABLE;
9166
9167         if (tg3_flag(tp, 5755_PLUS) ||
9168             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9169                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9170
9171         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9172                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9173                 tp->tx_mode &= ~val;
9174                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9175         }
9176
9177         tw32_f(MAC_TX_MODE, tp->tx_mode);
9178         udelay(100);
9179
9180         if (tg3_flag(tp, ENABLE_RSS)) {
9181                 tg3_rss_write_indir_tbl(tp);
9182
9183                 /* Setup the "secret" hash key. */
9184                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9185                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9186                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9187                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9188                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9189                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9190                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9191                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9192                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9193                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9194         }
9195
9196         tp->rx_mode = RX_MODE_ENABLE;
9197         if (tg3_flag(tp, 5755_PLUS))
9198                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9199
9200         if (tg3_flag(tp, ENABLE_RSS))
9201                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9202                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9203                                RX_MODE_RSS_IPV6_HASH_EN |
9204                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9205                                RX_MODE_RSS_IPV4_HASH_EN |
9206                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9207
9208         tw32_f(MAC_RX_MODE, tp->rx_mode);
9209         udelay(10);
9210
9211         tw32(MAC_LED_CTRL, tp->led_ctrl);
9212
9213         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9214         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9215                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9216                 udelay(10);
9217         }
9218         tw32_f(MAC_RX_MODE, tp->rx_mode);
9219         udelay(10);
9220
9221         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9222                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9223                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9224                         /* Set drive transmission level to 1.2V  */
9225                         /* only if the signal pre-emphasis bit is not set  */
9226                         val = tr32(MAC_SERDES_CFG);
9227                         val &= 0xfffff000;
9228                         val |= 0x880;
9229                         tw32(MAC_SERDES_CFG, val);
9230                 }
9231                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9232                         tw32(MAC_SERDES_CFG, 0x616000);
9233         }
9234
9235         /* Prevent chip from dropping frames when flow control
9236          * is enabled.
9237          */
9238         if (tg3_flag(tp, 57765_CLASS))
9239                 val = 1;
9240         else
9241                 val = 2;
9242         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9243
9244         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9245             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9246                 /* Use hardware link auto-negotiation */
9247                 tg3_flag_set(tp, HW_AUTONEG);
9248         }
9249
9250         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9252                 u32 tmp;
9253
9254                 tmp = tr32(SERDES_RX_CTRL);
9255                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9256                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9257                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9258                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9259         }
9260
9261         if (!tg3_flag(tp, USE_PHYLIB)) {
9262                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9263                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9264
9265                 err = tg3_setup_phy(tp, 0);
9266                 if (err)
9267                         return err;
9268
9269                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9270                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9271                         u32 tmp;
9272
9273                         /* Clear CRC stats. */
9274                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9275                                 tg3_writephy(tp, MII_TG3_TEST1,
9276                                              tmp | MII_TG3_TEST1_CRC_EN);
9277                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9278                         }
9279                 }
9280         }
9281
9282         __tg3_set_rx_mode(tp->dev);
9283
9284         /* Initialize receive rules. */
9285         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9286         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9287         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9288         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9289
9290         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9291                 limit = 8;
9292         else
9293                 limit = 16;
9294         if (tg3_flag(tp, ENABLE_ASF))
9295                 limit -= 4;
9296         switch (limit) {
9297         case 16:
9298                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9299         case 15:
9300                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9301         case 14:
9302                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9303         case 13:
9304                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9305         case 12:
9306                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9307         case 11:
9308                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9309         case 10:
9310                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9311         case 9:
9312                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9313         case 8:
9314                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9315         case 7:
9316                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9317         case 6:
9318                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9319         case 5:
9320                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9321         case 4:
9322                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9323         case 3:
9324                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9325         case 2:
9326         case 1:
9327
9328         default:
9329                 break;
9330         }
9331
9332         if (tg3_flag(tp, ENABLE_APE))
9333                 /* Write our heartbeat update interval to APE. */
9334                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9335                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9336
9337         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9338
9339         return 0;
9340 }
9341
9342 /* Called at device open time to get the chip ready for
9343  * packet processing.  Invoked with tp->lock held.
9344  */
9345 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9346 {
9347         tg3_switch_clocks(tp);
9348
9349         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9350
9351         return tg3_reset_hw(tp, reset_phy);
9352 }
9353
9354 #define TG3_STAT_ADD32(PSTAT, REG) \
9355 do {    u32 __val = tr32(REG); \
9356         (PSTAT)->low += __val; \
9357         if ((PSTAT)->low < __val) \
9358                 (PSTAT)->high += 1; \
9359 } while (0)
9360
9361 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9362 {
9363         struct tg3_hw_stats *sp = tp->hw_stats;
9364
9365         if (!netif_carrier_ok(tp->dev))
9366                 return;
9367
9368         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9369         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9370         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9371         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9372         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9373         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9374         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9375         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9376         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9377         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9378         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9379         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9380         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9381
9382         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9383         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9384         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9385         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9386         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9387         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9388         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9389         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9390         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9391         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9392         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9393         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9394         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9395         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9396
9397         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9398         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9399             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9400             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9401                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9402         } else {
9403                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9404                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9405                 if (val) {
9406                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9407                         sp->rx_discards.low += val;
9408                         if (sp->rx_discards.low < val)
9409                                 sp->rx_discards.high += 1;
9410                 }
9411                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9412         }
9413         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9414 }
9415
9416 static void tg3_chk_missed_msi(struct tg3 *tp)
9417 {
9418         u32 i;
9419
9420         for (i = 0; i < tp->irq_cnt; i++) {
9421                 struct tg3_napi *tnapi = &tp->napi[i];
9422
9423                 if (tg3_has_work(tnapi)) {
9424                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9425                             tnapi->last_tx_cons == tnapi->tx_cons) {
9426                                 if (tnapi->chk_msi_cnt < 1) {
9427                                         tnapi->chk_msi_cnt++;
9428                                         return;
9429                                 }
9430                                 tg3_msi(0, tnapi);
9431                         }
9432                 }
9433                 tnapi->chk_msi_cnt = 0;
9434                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9435                 tnapi->last_tx_cons = tnapi->tx_cons;
9436         }
9437 }
9438
9439 static void tg3_timer(unsigned long __opaque)
9440 {
9441         struct tg3 *tp = (struct tg3 *) __opaque;
9442
9443         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9444                 goto restart_timer;
9445
9446         spin_lock(&tp->lock);
9447
9448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9449             tg3_flag(tp, 57765_CLASS))
9450                 tg3_chk_missed_msi(tp);
9451
9452         if (!tg3_flag(tp, TAGGED_STATUS)) {
9453                 /* All of this garbage is because when using non-tagged
9454                  * IRQ status the mailbox/status_block protocol the chip
9455                  * uses with the cpu is race prone.
9456                  */
9457                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9458                         tw32(GRC_LOCAL_CTRL,
9459                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9460                 } else {
9461                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9462                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9463                 }
9464
9465                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9466                         spin_unlock(&tp->lock);
9467                         tg3_reset_task_schedule(tp);
9468                         goto restart_timer;
9469                 }
9470         }
9471
9472         /* This part only runs once per second. */
9473         if (!--tp->timer_counter) {
9474                 if (tg3_flag(tp, 5705_PLUS))
9475                         tg3_periodic_fetch_stats(tp);
9476
9477                 if (tp->setlpicnt && !--tp->setlpicnt)
9478                         tg3_phy_eee_enable(tp);
9479
9480                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9481                         u32 mac_stat;
9482                         int phy_event;
9483
9484                         mac_stat = tr32(MAC_STATUS);
9485
9486                         phy_event = 0;
9487                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9488                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9489                                         phy_event = 1;
9490                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9491                                 phy_event = 1;
9492
9493                         if (phy_event)
9494                                 tg3_setup_phy(tp, 0);
9495                 } else if (tg3_flag(tp, POLL_SERDES)) {
9496                         u32 mac_stat = tr32(MAC_STATUS);
9497                         int need_setup = 0;
9498
9499                         if (netif_carrier_ok(tp->dev) &&
9500                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9501                                 need_setup = 1;
9502                         }
9503                         if (!netif_carrier_ok(tp->dev) &&
9504                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9505                                          MAC_STATUS_SIGNAL_DET))) {
9506                                 need_setup = 1;
9507                         }
9508                         if (need_setup) {
9509                                 if (!tp->serdes_counter) {
9510                                         tw32_f(MAC_MODE,
9511                                              (tp->mac_mode &
9512                                               ~MAC_MODE_PORT_MODE_MASK));
9513                                         udelay(40);
9514                                         tw32_f(MAC_MODE, tp->mac_mode);
9515                                         udelay(40);
9516                                 }
9517                                 tg3_setup_phy(tp, 0);
9518                         }
9519                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9520                            tg3_flag(tp, 5780_CLASS)) {
9521                         tg3_serdes_parallel_detect(tp);
9522                 }
9523
9524                 tp->timer_counter = tp->timer_multiplier;
9525         }
9526
9527         /* Heartbeat is only sent once every 2 seconds.
9528          *
9529          * The heartbeat is to tell the ASF firmware that the host
9530          * driver is still alive.  In the event that the OS crashes,
9531          * ASF needs to reset the hardware to free up the FIFO space
9532          * that may be filled with rx packets destined for the host.
9533          * If the FIFO is full, ASF will no longer function properly.
9534          *
9535          * Unintended resets have been reported on real time kernels
9536          * where the timer doesn't run on time.  Netpoll will also have
9537          * same problem.
9538          *
9539          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9540          * to check the ring condition when the heartbeat is expiring
9541          * before doing the reset.  This will prevent most unintended
9542          * resets.
9543          */
9544         if (!--tp->asf_counter) {
9545                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9546                         tg3_wait_for_event_ack(tp);
9547
9548                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9549                                       FWCMD_NICDRV_ALIVE3);
9550                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9551                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9552                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9553
9554                         tg3_generate_fw_event(tp);
9555                 }
9556                 tp->asf_counter = tp->asf_multiplier;
9557         }
9558
9559         spin_unlock(&tp->lock);
9560
9561 restart_timer:
9562         tp->timer.expires = jiffies + tp->timer_offset;
9563         add_timer(&tp->timer);
9564 }
9565
9566 static void __devinit tg3_timer_init(struct tg3 *tp)
9567 {
9568         if (tg3_flag(tp, TAGGED_STATUS) &&
9569             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9570             !tg3_flag(tp, 57765_CLASS))
9571                 tp->timer_offset = HZ;
9572         else
9573                 tp->timer_offset = HZ / 10;
9574
9575         BUG_ON(tp->timer_offset > HZ);
9576
9577         tp->timer_multiplier = (HZ / tp->timer_offset);
9578         tp->asf_multiplier = (HZ / tp->timer_offset) *
9579                              TG3_FW_UPDATE_FREQ_SEC;
9580
9581         init_timer(&tp->timer);
9582         tp->timer.data = (unsigned long) tp;
9583         tp->timer.function = tg3_timer;
9584 }
9585
9586 static void tg3_timer_start(struct tg3 *tp)
9587 {
9588         tp->asf_counter   = tp->asf_multiplier;
9589         tp->timer_counter = tp->timer_multiplier;
9590
9591         tp->timer.expires = jiffies + tp->timer_offset;
9592         add_timer(&tp->timer);
9593 }
9594
9595 static void tg3_timer_stop(struct tg3 *tp)
9596 {
9597         del_timer_sync(&tp->timer);
9598 }
9599
9600 /* Restart hardware after configuration changes, self-test, etc.
9601  * Invoked with tp->lock held.
9602  */
9603 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9604         __releases(tp->lock)
9605         __acquires(tp->lock)
9606 {
9607         int err;
9608
9609         err = tg3_init_hw(tp, reset_phy);
9610         if (err) {
9611                 netdev_err(tp->dev,
9612                            "Failed to re-initialize device, aborting\n");
9613                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9614                 tg3_full_unlock(tp);
9615                 tg3_timer_stop(tp);
9616                 tp->irq_sync = 0;
9617                 tg3_napi_enable(tp);
9618                 dev_close(tp->dev);
9619                 tg3_full_lock(tp, 0);
9620         }
9621         return err;
9622 }
9623
9624 static void tg3_reset_task(struct work_struct *work)
9625 {
9626         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9627         int err;
9628
9629         tg3_full_lock(tp, 0);
9630
9631         if (!netif_running(tp->dev)) {
9632                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9633                 tg3_full_unlock(tp);
9634                 return;
9635         }
9636
9637         tg3_full_unlock(tp);
9638
9639         tg3_phy_stop(tp);
9640
9641         tg3_netif_stop(tp);
9642
9643         tg3_full_lock(tp, 1);
9644
9645         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9646                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9647                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9648                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9649                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9650         }
9651
9652         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9653         err = tg3_init_hw(tp, 1);
9654         if (err)
9655                 goto out;
9656
9657         tg3_netif_start(tp);
9658
9659 out:
9660         tg3_full_unlock(tp);
9661
9662         if (!err)
9663                 tg3_phy_start(tp);
9664
9665         tg3_flag_clear(tp, RESET_TASK_PENDING);
9666 }
9667
9668 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9669 {
9670         irq_handler_t fn;
9671         unsigned long flags;
9672         char *name;
9673         struct tg3_napi *tnapi = &tp->napi[irq_num];
9674
9675         if (tp->irq_cnt == 1)
9676                 name = tp->dev->name;
9677         else {
9678                 name = &tnapi->irq_lbl[0];
9679                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9680                 name[IFNAMSIZ-1] = 0;
9681         }
9682
9683         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9684                 fn = tg3_msi;
9685                 if (tg3_flag(tp, 1SHOT_MSI))
9686                         fn = tg3_msi_1shot;
9687                 flags = 0;
9688         } else {
9689                 fn = tg3_interrupt;
9690                 if (tg3_flag(tp, TAGGED_STATUS))
9691                         fn = tg3_interrupt_tagged;
9692                 flags = IRQF_SHARED;
9693         }
9694
9695         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9696 }
9697
9698 static int tg3_test_interrupt(struct tg3 *tp)
9699 {
9700         struct tg3_napi *tnapi = &tp->napi[0];
9701         struct net_device *dev = tp->dev;
9702         int err, i, intr_ok = 0;
9703         u32 val;
9704
9705         if (!netif_running(dev))
9706                 return -ENODEV;
9707
9708         tg3_disable_ints(tp);
9709
9710         free_irq(tnapi->irq_vec, tnapi);
9711
9712         /*
9713          * Turn off MSI one shot mode.  Otherwise this test has no
9714          * observable way to know whether the interrupt was delivered.
9715          */
9716         if (tg3_flag(tp, 57765_PLUS)) {
9717                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9718                 tw32(MSGINT_MODE, val);
9719         }
9720
9721         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9722                           IRQF_SHARED, dev->name, tnapi);
9723         if (err)
9724                 return err;
9725
9726         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9727         tg3_enable_ints(tp);
9728
9729         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9730                tnapi->coal_now);
9731
9732         for (i = 0; i < 5; i++) {
9733                 u32 int_mbox, misc_host_ctrl;
9734
9735                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9736                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9737
9738                 if ((int_mbox != 0) ||
9739                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9740                         intr_ok = 1;
9741                         break;
9742                 }
9743
9744                 if (tg3_flag(tp, 57765_PLUS) &&
9745                     tnapi->hw_status->status_tag != tnapi->last_tag)
9746                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9747
9748                 msleep(10);
9749         }
9750
9751         tg3_disable_ints(tp);
9752
9753         free_irq(tnapi->irq_vec, tnapi);
9754
9755         err = tg3_request_irq(tp, 0);
9756
9757         if (err)
9758                 return err;
9759
9760         if (intr_ok) {
9761                 /* Reenable MSI one shot mode. */
9762                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9763                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9764                         tw32(MSGINT_MODE, val);
9765                 }
9766                 return 0;
9767         }
9768
9769         return -EIO;
9770 }
9771
9772 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9773  * successfully restored
9774  */
9775 static int tg3_test_msi(struct tg3 *tp)
9776 {
9777         int err;
9778         u16 pci_cmd;
9779
9780         if (!tg3_flag(tp, USING_MSI))
9781                 return 0;
9782
9783         /* Turn off SERR reporting in case MSI terminates with Master
9784          * Abort.
9785          */
9786         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9787         pci_write_config_word(tp->pdev, PCI_COMMAND,
9788                               pci_cmd & ~PCI_COMMAND_SERR);
9789
9790         err = tg3_test_interrupt(tp);
9791
9792         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9793
9794         if (!err)
9795                 return 0;
9796
9797         /* other failures */
9798         if (err != -EIO)
9799                 return err;
9800
9801         /* MSI test failed, go back to INTx mode */
9802         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9803                     "to INTx mode. Please report this failure to the PCI "
9804                     "maintainer and include system chipset information\n");
9805
9806         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9807
9808         pci_disable_msi(tp->pdev);
9809
9810         tg3_flag_clear(tp, USING_MSI);
9811         tp->napi[0].irq_vec = tp->pdev->irq;
9812
9813         err = tg3_request_irq(tp, 0);
9814         if (err)
9815                 return err;
9816
9817         /* Need to reset the chip because the MSI cycle may have terminated
9818          * with Master Abort.
9819          */
9820         tg3_full_lock(tp, 1);
9821
9822         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9823         err = tg3_init_hw(tp, 1);
9824
9825         tg3_full_unlock(tp);
9826
9827         if (err)
9828                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9829
9830         return err;
9831 }
9832
9833 static int tg3_request_firmware(struct tg3 *tp)
9834 {
9835         const __be32 *fw_data;
9836
9837         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9838                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9839                            tp->fw_needed);
9840                 return -ENOENT;
9841         }
9842
9843         fw_data = (void *)tp->fw->data;
9844
9845         /* Firmware blob starts with version numbers, followed by
9846          * start address and _full_ length including BSS sections
9847          * (which must be longer than the actual data, of course
9848          */
9849
9850         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9851         if (tp->fw_len < (tp->fw->size - 12)) {
9852                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9853                            tp->fw_len, tp->fw_needed);
9854                 release_firmware(tp->fw);
9855                 tp->fw = NULL;
9856                 return -EINVAL;
9857         }
9858
9859         /* We no longer need firmware; we have it. */
9860         tp->fw_needed = NULL;
9861         return 0;
9862 }
9863
9864 static bool tg3_enable_msix(struct tg3 *tp)
9865 {
9866         int i, rc;
9867         struct msix_entry msix_ent[tp->irq_max];
9868
9869         tp->irq_cnt = num_online_cpus();
9870         if (tp->irq_cnt > 1) {
9871                 /* We want as many rx rings enabled as there are cpus.
9872                  * In multiqueue MSI-X mode, the first MSI-X vector
9873                  * only deals with link interrupts, etc, so we add
9874                  * one to the number of vectors we are requesting.
9875                  */
9876                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9877         }
9878
9879         for (i = 0; i < tp->irq_max; i++) {
9880                 msix_ent[i].entry  = i;
9881                 msix_ent[i].vector = 0;
9882         }
9883
9884         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9885         if (rc < 0) {
9886                 return false;
9887         } else if (rc != 0) {
9888                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9889                         return false;
9890                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9891                               tp->irq_cnt, rc);
9892                 tp->irq_cnt = rc;
9893         }
9894
9895         for (i = 0; i < tp->irq_max; i++)
9896                 tp->napi[i].irq_vec = msix_ent[i].vector;
9897
9898         netif_set_real_num_tx_queues(tp->dev, 1);
9899         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9900         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9901                 pci_disable_msix(tp->pdev);
9902                 return false;
9903         }
9904
9905         if (tp->irq_cnt > 1) {
9906                 tg3_flag_set(tp, ENABLE_RSS);
9907
9908                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9909                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9910                         tg3_flag_set(tp, ENABLE_TSS);
9911                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9912                 }
9913         }
9914
9915         return true;
9916 }
9917
9918 static void tg3_ints_init(struct tg3 *tp)
9919 {
9920         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9921             !tg3_flag(tp, TAGGED_STATUS)) {
9922                 /* All MSI supporting chips should support tagged
9923                  * status.  Assert that this is the case.
9924                  */
9925                 netdev_warn(tp->dev,
9926                             "MSI without TAGGED_STATUS? Not using MSI\n");
9927                 goto defcfg;
9928         }
9929
9930         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9931                 tg3_flag_set(tp, USING_MSIX);
9932         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9933                 tg3_flag_set(tp, USING_MSI);
9934
9935         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9936                 u32 msi_mode = tr32(MSGINT_MODE);
9937                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9938                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9939                 if (!tg3_flag(tp, 1SHOT_MSI))
9940                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9941                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9942         }
9943 defcfg:
9944         if (!tg3_flag(tp, USING_MSIX)) {
9945                 tp->irq_cnt = 1;
9946                 tp->napi[0].irq_vec = tp->pdev->irq;
9947                 netif_set_real_num_tx_queues(tp->dev, 1);
9948                 netif_set_real_num_rx_queues(tp->dev, 1);
9949         }
9950 }
9951
9952 static void tg3_ints_fini(struct tg3 *tp)
9953 {
9954         if (tg3_flag(tp, USING_MSIX))
9955                 pci_disable_msix(tp->pdev);
9956         else if (tg3_flag(tp, USING_MSI))
9957                 pci_disable_msi(tp->pdev);
9958         tg3_flag_clear(tp, USING_MSI);
9959         tg3_flag_clear(tp, USING_MSIX);
9960         tg3_flag_clear(tp, ENABLE_RSS);
9961         tg3_flag_clear(tp, ENABLE_TSS);
9962 }
9963
9964 static int tg3_open(struct net_device *dev)
9965 {
9966         struct tg3 *tp = netdev_priv(dev);
9967         int i, err;
9968
9969         if (tp->fw_needed) {
9970                 err = tg3_request_firmware(tp);
9971                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9972                         if (err)
9973                                 return err;
9974                 } else if (err) {
9975                         netdev_warn(tp->dev, "TSO capability disabled\n");
9976                         tg3_flag_clear(tp, TSO_CAPABLE);
9977                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9978                         netdev_notice(tp->dev, "TSO capability restored\n");
9979                         tg3_flag_set(tp, TSO_CAPABLE);
9980                 }
9981         }
9982
9983         netif_carrier_off(tp->dev);
9984
9985         err = tg3_power_up(tp);
9986         if (err)
9987                 return err;
9988
9989         tg3_full_lock(tp, 0);
9990
9991         tg3_disable_ints(tp);
9992         tg3_flag_clear(tp, INIT_COMPLETE);
9993
9994         tg3_full_unlock(tp);
9995
9996         /*
9997          * Setup interrupts first so we know how
9998          * many NAPI resources to allocate
9999          */
10000         tg3_ints_init(tp);
10001
10002         tg3_rss_check_indir_tbl(tp);
10003
10004         /* The placement of this call is tied
10005          * to the setup and use of Host TX descriptors.
10006          */
10007         err = tg3_alloc_consistent(tp);
10008         if (err)
10009                 goto err_out1;
10010
10011         tg3_napi_init(tp);
10012
10013         tg3_napi_enable(tp);
10014
10015         for (i = 0; i < tp->irq_cnt; i++) {
10016                 struct tg3_napi *tnapi = &tp->napi[i];
10017                 err = tg3_request_irq(tp, i);
10018                 if (err) {
10019                         for (i--; i >= 0; i--) {
10020                                 tnapi = &tp->napi[i];
10021                                 free_irq(tnapi->irq_vec, tnapi);
10022                         }
10023                         goto err_out2;
10024                 }
10025         }
10026
10027         tg3_full_lock(tp, 0);
10028
10029         err = tg3_init_hw(tp, 1);
10030         if (err) {
10031                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10032                 tg3_free_rings(tp);
10033         }
10034
10035         tg3_full_unlock(tp);
10036
10037         if (err)
10038                 goto err_out3;
10039
10040         if (tg3_flag(tp, USING_MSI)) {
10041                 err = tg3_test_msi(tp);
10042
10043                 if (err) {
10044                         tg3_full_lock(tp, 0);
10045                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10046                         tg3_free_rings(tp);
10047                         tg3_full_unlock(tp);
10048
10049                         goto err_out2;
10050                 }
10051
10052                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10053                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10054
10055                         tw32(PCIE_TRANSACTION_CFG,
10056                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10057                 }
10058         }
10059
10060         tg3_phy_start(tp);
10061
10062         tg3_full_lock(tp, 0);
10063
10064         tg3_timer_start(tp);
10065         tg3_flag_set(tp, INIT_COMPLETE);
10066         tg3_enable_ints(tp);
10067
10068         tg3_full_unlock(tp);
10069
10070         netif_tx_start_all_queues(dev);
10071
10072         /*
10073          * Reset loopback feature if it was turned on while the device was down
10074          * make sure that it's installed properly now.
10075          */
10076         if (dev->features & NETIF_F_LOOPBACK)
10077                 tg3_set_loopback(dev, dev->features);
10078
10079         return 0;
10080
10081 err_out3:
10082         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10083                 struct tg3_napi *tnapi = &tp->napi[i];
10084                 free_irq(tnapi->irq_vec, tnapi);
10085         }
10086
10087 err_out2:
10088         tg3_napi_disable(tp);
10089         tg3_napi_fini(tp);
10090         tg3_free_consistent(tp);
10091
10092 err_out1:
10093         tg3_ints_fini(tp);
10094         tg3_frob_aux_power(tp, false);
10095         pci_set_power_state(tp->pdev, PCI_D3hot);
10096         return err;
10097 }
10098
10099 static int tg3_close(struct net_device *dev)
10100 {
10101         int i;
10102         struct tg3 *tp = netdev_priv(dev);
10103
10104         tg3_napi_disable(tp);
10105         tg3_reset_task_cancel(tp);
10106
10107         netif_tx_stop_all_queues(dev);
10108
10109         tg3_timer_stop(tp);
10110
10111         tg3_phy_stop(tp);
10112
10113         tg3_full_lock(tp, 1);
10114
10115         tg3_disable_ints(tp);
10116
10117         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10118         tg3_free_rings(tp);
10119         tg3_flag_clear(tp, INIT_COMPLETE);
10120
10121         tg3_full_unlock(tp);
10122
10123         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10124                 struct tg3_napi *tnapi = &tp->napi[i];
10125                 free_irq(tnapi->irq_vec, tnapi);
10126         }
10127
10128         tg3_ints_fini(tp);
10129
10130         /* Clear stats across close / open calls */
10131         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10132         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10133
10134         tg3_napi_fini(tp);
10135
10136         tg3_free_consistent(tp);
10137
10138         tg3_power_down(tp);
10139
10140         netif_carrier_off(tp->dev);
10141
10142         return 0;
10143 }
10144
10145 static inline u64 get_stat64(tg3_stat64_t *val)
10146 {
10147        return ((u64)val->high << 32) | ((u64)val->low);
10148 }
10149
10150 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10151 {
10152         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10153
10154         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10155             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10156              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10157                 u32 val;
10158
10159                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10160                         tg3_writephy(tp, MII_TG3_TEST1,
10161                                      val | MII_TG3_TEST1_CRC_EN);
10162                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10163                 } else
10164                         val = 0;
10165
10166                 tp->phy_crc_errors += val;
10167
10168                 return tp->phy_crc_errors;
10169         }
10170
10171         return get_stat64(&hw_stats->rx_fcs_errors);
10172 }
10173
10174 #define ESTAT_ADD(member) \
10175         estats->member =        old_estats->member + \
10176                                 get_stat64(&hw_stats->member)
10177
10178 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10179 {
10180         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10181         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10182
10183         ESTAT_ADD(rx_octets);
10184         ESTAT_ADD(rx_fragments);
10185         ESTAT_ADD(rx_ucast_packets);
10186         ESTAT_ADD(rx_mcast_packets);
10187         ESTAT_ADD(rx_bcast_packets);
10188         ESTAT_ADD(rx_fcs_errors);
10189         ESTAT_ADD(rx_align_errors);
10190         ESTAT_ADD(rx_xon_pause_rcvd);
10191         ESTAT_ADD(rx_xoff_pause_rcvd);
10192         ESTAT_ADD(rx_mac_ctrl_rcvd);
10193         ESTAT_ADD(rx_xoff_entered);
10194         ESTAT_ADD(rx_frame_too_long_errors);
10195         ESTAT_ADD(rx_jabbers);
10196         ESTAT_ADD(rx_undersize_packets);
10197         ESTAT_ADD(rx_in_length_errors);
10198         ESTAT_ADD(rx_out_length_errors);
10199         ESTAT_ADD(rx_64_or_less_octet_packets);
10200         ESTAT_ADD(rx_65_to_127_octet_packets);
10201         ESTAT_ADD(rx_128_to_255_octet_packets);
10202         ESTAT_ADD(rx_256_to_511_octet_packets);
10203         ESTAT_ADD(rx_512_to_1023_octet_packets);
10204         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10205         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10206         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10207         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10208         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10209
10210         ESTAT_ADD(tx_octets);
10211         ESTAT_ADD(tx_collisions);
10212         ESTAT_ADD(tx_xon_sent);
10213         ESTAT_ADD(tx_xoff_sent);
10214         ESTAT_ADD(tx_flow_control);
10215         ESTAT_ADD(tx_mac_errors);
10216         ESTAT_ADD(tx_single_collisions);
10217         ESTAT_ADD(tx_mult_collisions);
10218         ESTAT_ADD(tx_deferred);
10219         ESTAT_ADD(tx_excessive_collisions);
10220         ESTAT_ADD(tx_late_collisions);
10221         ESTAT_ADD(tx_collide_2times);
10222         ESTAT_ADD(tx_collide_3times);
10223         ESTAT_ADD(tx_collide_4times);
10224         ESTAT_ADD(tx_collide_5times);
10225         ESTAT_ADD(tx_collide_6times);
10226         ESTAT_ADD(tx_collide_7times);
10227         ESTAT_ADD(tx_collide_8times);
10228         ESTAT_ADD(tx_collide_9times);
10229         ESTAT_ADD(tx_collide_10times);
10230         ESTAT_ADD(tx_collide_11times);
10231         ESTAT_ADD(tx_collide_12times);
10232         ESTAT_ADD(tx_collide_13times);
10233         ESTAT_ADD(tx_collide_14times);
10234         ESTAT_ADD(tx_collide_15times);
10235         ESTAT_ADD(tx_ucast_packets);
10236         ESTAT_ADD(tx_mcast_packets);
10237         ESTAT_ADD(tx_bcast_packets);
10238         ESTAT_ADD(tx_carrier_sense_errors);
10239         ESTAT_ADD(tx_discards);
10240         ESTAT_ADD(tx_errors);
10241
10242         ESTAT_ADD(dma_writeq_full);
10243         ESTAT_ADD(dma_write_prioq_full);
10244         ESTAT_ADD(rxbds_empty);
10245         ESTAT_ADD(rx_discards);
10246         ESTAT_ADD(rx_errors);
10247         ESTAT_ADD(rx_threshold_hit);
10248
10249         ESTAT_ADD(dma_readq_full);
10250         ESTAT_ADD(dma_read_prioq_full);
10251         ESTAT_ADD(tx_comp_queue_full);
10252
10253         ESTAT_ADD(ring_set_send_prod_index);
10254         ESTAT_ADD(ring_status_update);
10255         ESTAT_ADD(nic_irqs);
10256         ESTAT_ADD(nic_avoided_irqs);
10257         ESTAT_ADD(nic_tx_threshold_hit);
10258
10259         ESTAT_ADD(mbuf_lwm_thresh_hit);
10260 }
10261
10262 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10263 {
10264         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10265         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10266
10267         stats->rx_packets = old_stats->rx_packets +
10268                 get_stat64(&hw_stats->rx_ucast_packets) +
10269                 get_stat64(&hw_stats->rx_mcast_packets) +
10270                 get_stat64(&hw_stats->rx_bcast_packets);
10271
10272         stats->tx_packets = old_stats->tx_packets +
10273                 get_stat64(&hw_stats->tx_ucast_packets) +
10274                 get_stat64(&hw_stats->tx_mcast_packets) +
10275                 get_stat64(&hw_stats->tx_bcast_packets);
10276
10277         stats->rx_bytes = old_stats->rx_bytes +
10278                 get_stat64(&hw_stats->rx_octets);
10279         stats->tx_bytes = old_stats->tx_bytes +
10280                 get_stat64(&hw_stats->tx_octets);
10281
10282         stats->rx_errors = old_stats->rx_errors +
10283                 get_stat64(&hw_stats->rx_errors);
10284         stats->tx_errors = old_stats->tx_errors +
10285                 get_stat64(&hw_stats->tx_errors) +
10286                 get_stat64(&hw_stats->tx_mac_errors) +
10287                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10288                 get_stat64(&hw_stats->tx_discards);
10289
10290         stats->multicast = old_stats->multicast +
10291                 get_stat64(&hw_stats->rx_mcast_packets);
10292         stats->collisions = old_stats->collisions +
10293                 get_stat64(&hw_stats->tx_collisions);
10294
10295         stats->rx_length_errors = old_stats->rx_length_errors +
10296                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10297                 get_stat64(&hw_stats->rx_undersize_packets);
10298
10299         stats->rx_over_errors = old_stats->rx_over_errors +
10300                 get_stat64(&hw_stats->rxbds_empty);
10301         stats->rx_frame_errors = old_stats->rx_frame_errors +
10302                 get_stat64(&hw_stats->rx_align_errors);
10303         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10304                 get_stat64(&hw_stats->tx_discards);
10305         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10306                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10307
10308         stats->rx_crc_errors = old_stats->rx_crc_errors +
10309                 tg3_calc_crc_errors(tp);
10310
10311         stats->rx_missed_errors = old_stats->rx_missed_errors +
10312                 get_stat64(&hw_stats->rx_discards);
10313
10314         stats->rx_dropped = tp->rx_dropped;
10315         stats->tx_dropped = tp->tx_dropped;
10316 }
10317
10318 static int tg3_get_regs_len(struct net_device *dev)
10319 {
10320         return TG3_REG_BLK_SIZE;
10321 }
10322
10323 static void tg3_get_regs(struct net_device *dev,
10324                 struct ethtool_regs *regs, void *_p)
10325 {
10326         struct tg3 *tp = netdev_priv(dev);
10327
10328         regs->version = 0;
10329
10330         memset(_p, 0, TG3_REG_BLK_SIZE);
10331
10332         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10333                 return;
10334
10335         tg3_full_lock(tp, 0);
10336
10337         tg3_dump_legacy_regs(tp, (u32 *)_p);
10338
10339         tg3_full_unlock(tp);
10340 }
10341
10342 static int tg3_get_eeprom_len(struct net_device *dev)
10343 {
10344         struct tg3 *tp = netdev_priv(dev);
10345
10346         return tp->nvram_size;
10347 }
10348
10349 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10350 {
10351         struct tg3 *tp = netdev_priv(dev);
10352         int ret;
10353         u8  *pd;
10354         u32 i, offset, len, b_offset, b_count;
10355         __be32 val;
10356
10357         if (tg3_flag(tp, NO_NVRAM))
10358                 return -EINVAL;
10359
10360         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10361                 return -EAGAIN;
10362
10363         offset = eeprom->offset;
10364         len = eeprom->len;
10365         eeprom->len = 0;
10366
10367         eeprom->magic = TG3_EEPROM_MAGIC;
10368
10369         if (offset & 3) {
10370                 /* adjustments to start on required 4 byte boundary */
10371                 b_offset = offset & 3;
10372                 b_count = 4 - b_offset;
10373                 if (b_count > len) {
10374                         /* i.e. offset=1 len=2 */
10375                         b_count = len;
10376                 }
10377                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10378                 if (ret)
10379                         return ret;
10380                 memcpy(data, ((char *)&val) + b_offset, b_count);
10381                 len -= b_count;
10382                 offset += b_count;
10383                 eeprom->len += b_count;
10384         }
10385
10386         /* read bytes up to the last 4 byte boundary */
10387         pd = &data[eeprom->len];
10388         for (i = 0; i < (len - (len & 3)); i += 4) {
10389                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10390                 if (ret) {
10391                         eeprom->len += i;
10392                         return ret;
10393                 }
10394                 memcpy(pd + i, &val, 4);
10395         }
10396         eeprom->len += i;
10397
10398         if (len & 3) {
10399                 /* read last bytes not ending on 4 byte boundary */
10400                 pd = &data[eeprom->len];
10401                 b_count = len & 3;
10402                 b_offset = offset + len - b_count;
10403                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10404                 if (ret)
10405                         return ret;
10406                 memcpy(pd, &val, b_count);
10407                 eeprom->len += b_count;
10408         }
10409         return 0;
10410 }
10411
10412 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10413 {
10414         struct tg3 *tp = netdev_priv(dev);
10415         int ret;
10416         u32 offset, len, b_offset, odd_len;
10417         u8 *buf;
10418         __be32 start, end;
10419
10420         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10421                 return -EAGAIN;
10422
10423         if (tg3_flag(tp, NO_NVRAM) ||
10424             eeprom->magic != TG3_EEPROM_MAGIC)
10425                 return -EINVAL;
10426
10427         offset = eeprom->offset;
10428         len = eeprom->len;
10429
10430         if ((b_offset = (offset & 3))) {
10431                 /* adjustments to start on required 4 byte boundary */
10432                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10433                 if (ret)
10434                         return ret;
10435                 len += b_offset;
10436                 offset &= ~3;
10437                 if (len < 4)
10438                         len = 4;
10439         }
10440
10441         odd_len = 0;
10442         if (len & 3) {
10443                 /* adjustments to end on required 4 byte boundary */
10444                 odd_len = 1;
10445                 len = (len + 3) & ~3;
10446                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10447                 if (ret)
10448                         return ret;
10449         }
10450
10451         buf = data;
10452         if (b_offset || odd_len) {
10453                 buf = kmalloc(len, GFP_KERNEL);
10454                 if (!buf)
10455                         return -ENOMEM;
10456                 if (b_offset)
10457                         memcpy(buf, &start, 4);
10458                 if (odd_len)
10459                         memcpy(buf+len-4, &end, 4);
10460                 memcpy(buf + b_offset, data, eeprom->len);
10461         }
10462
10463         ret = tg3_nvram_write_block(tp, offset, len, buf);
10464
10465         if (buf != data)
10466                 kfree(buf);
10467
10468         return ret;
10469 }
10470
10471 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10472 {
10473         struct tg3 *tp = netdev_priv(dev);
10474
10475         if (tg3_flag(tp, USE_PHYLIB)) {
10476                 struct phy_device *phydev;
10477                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10478                         return -EAGAIN;
10479                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10480                 return phy_ethtool_gset(phydev, cmd);
10481         }
10482
10483         cmd->supported = (SUPPORTED_Autoneg);
10484
10485         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10486                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10487                                    SUPPORTED_1000baseT_Full);
10488
10489         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10490                 cmd->supported |= (SUPPORTED_100baseT_Half |
10491                                   SUPPORTED_100baseT_Full |
10492                                   SUPPORTED_10baseT_Half |
10493                                   SUPPORTED_10baseT_Full |
10494                                   SUPPORTED_TP);
10495                 cmd->port = PORT_TP;
10496         } else {
10497                 cmd->supported |= SUPPORTED_FIBRE;
10498                 cmd->port = PORT_FIBRE;
10499         }
10500
10501         cmd->advertising = tp->link_config.advertising;
10502         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10503                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10504                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10505                                 cmd->advertising |= ADVERTISED_Pause;
10506                         } else {
10507                                 cmd->advertising |= ADVERTISED_Pause |
10508                                                     ADVERTISED_Asym_Pause;
10509                         }
10510                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10511                         cmd->advertising |= ADVERTISED_Asym_Pause;
10512                 }
10513         }
10514         if (netif_running(dev) && netif_carrier_ok(dev)) {
10515                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10516                 cmd->duplex = tp->link_config.active_duplex;
10517                 cmd->lp_advertising = tp->link_config.rmt_adv;
10518                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10519                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10520                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10521                         else
10522                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10523                 }
10524         } else {
10525                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10526                 cmd->duplex = DUPLEX_UNKNOWN;
10527                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10528         }
10529         cmd->phy_address = tp->phy_addr;
10530         cmd->transceiver = XCVR_INTERNAL;
10531         cmd->autoneg = tp->link_config.autoneg;
10532         cmd->maxtxpkt = 0;
10533         cmd->maxrxpkt = 0;
10534         return 0;
10535 }
10536
10537 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10538 {
10539         struct tg3 *tp = netdev_priv(dev);
10540         u32 speed = ethtool_cmd_speed(cmd);
10541
10542         if (tg3_flag(tp, USE_PHYLIB)) {
10543                 struct phy_device *phydev;
10544                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10545                         return -EAGAIN;
10546                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10547                 return phy_ethtool_sset(phydev, cmd);
10548         }
10549
10550         if (cmd->autoneg != AUTONEG_ENABLE &&
10551             cmd->autoneg != AUTONEG_DISABLE)
10552                 return -EINVAL;
10553
10554         if (cmd->autoneg == AUTONEG_DISABLE &&
10555             cmd->duplex != DUPLEX_FULL &&
10556             cmd->duplex != DUPLEX_HALF)
10557                 return -EINVAL;
10558
10559         if (cmd->autoneg == AUTONEG_ENABLE) {
10560                 u32 mask = ADVERTISED_Autoneg |
10561                            ADVERTISED_Pause |
10562                            ADVERTISED_Asym_Pause;
10563
10564                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10565                         mask |= ADVERTISED_1000baseT_Half |
10566                                 ADVERTISED_1000baseT_Full;
10567
10568                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10569                         mask |= ADVERTISED_100baseT_Half |
10570                                 ADVERTISED_100baseT_Full |
10571                                 ADVERTISED_10baseT_Half |
10572                                 ADVERTISED_10baseT_Full |
10573                                 ADVERTISED_TP;
10574                 else
10575                         mask |= ADVERTISED_FIBRE;
10576
10577                 if (cmd->advertising & ~mask)
10578                         return -EINVAL;
10579
10580                 mask &= (ADVERTISED_1000baseT_Half |
10581                          ADVERTISED_1000baseT_Full |
10582                          ADVERTISED_100baseT_Half |
10583                          ADVERTISED_100baseT_Full |
10584                          ADVERTISED_10baseT_Half |
10585                          ADVERTISED_10baseT_Full);
10586
10587                 cmd->advertising &= mask;
10588         } else {
10589                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10590                         if (speed != SPEED_1000)
10591                                 return -EINVAL;
10592
10593                         if (cmd->duplex != DUPLEX_FULL)
10594                                 return -EINVAL;
10595                 } else {
10596                         if (speed != SPEED_100 &&
10597                             speed != SPEED_10)
10598                                 return -EINVAL;
10599                 }
10600         }
10601
10602         tg3_full_lock(tp, 0);
10603
10604         tp->link_config.autoneg = cmd->autoneg;
10605         if (cmd->autoneg == AUTONEG_ENABLE) {
10606                 tp->link_config.advertising = (cmd->advertising |
10607                                               ADVERTISED_Autoneg);
10608                 tp->link_config.speed = SPEED_UNKNOWN;
10609                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10610         } else {
10611                 tp->link_config.advertising = 0;
10612                 tp->link_config.speed = speed;
10613                 tp->link_config.duplex = cmd->duplex;
10614         }
10615
10616         if (netif_running(dev))
10617                 tg3_setup_phy(tp, 1);
10618
10619         tg3_full_unlock(tp);
10620
10621         return 0;
10622 }
10623
10624 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10625 {
10626         struct tg3 *tp = netdev_priv(dev);
10627
10628         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10629         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10630         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10631         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10632 }
10633
10634 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10635 {
10636         struct tg3 *tp = netdev_priv(dev);
10637
10638         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10639                 wol->supported = WAKE_MAGIC;
10640         else
10641                 wol->supported = 0;
10642         wol->wolopts = 0;
10643         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10644                 wol->wolopts = WAKE_MAGIC;
10645         memset(&wol->sopass, 0, sizeof(wol->sopass));
10646 }
10647
10648 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10649 {
10650         struct tg3 *tp = netdev_priv(dev);
10651         struct device *dp = &tp->pdev->dev;
10652
10653         if (wol->wolopts & ~WAKE_MAGIC)
10654                 return -EINVAL;
10655         if ((wol->wolopts & WAKE_MAGIC) &&
10656             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10657                 return -EINVAL;
10658
10659         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10660
10661         spin_lock_bh(&tp->lock);
10662         if (device_may_wakeup(dp))
10663                 tg3_flag_set(tp, WOL_ENABLE);
10664         else
10665                 tg3_flag_clear(tp, WOL_ENABLE);
10666         spin_unlock_bh(&tp->lock);
10667
10668         return 0;
10669 }
10670
10671 static u32 tg3_get_msglevel(struct net_device *dev)
10672 {
10673         struct tg3 *tp = netdev_priv(dev);
10674         return tp->msg_enable;
10675 }
10676
10677 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10678 {
10679         struct tg3 *tp = netdev_priv(dev);
10680         tp->msg_enable = value;
10681 }
10682
10683 static int tg3_nway_reset(struct net_device *dev)
10684 {
10685         struct tg3 *tp = netdev_priv(dev);
10686         int r;
10687
10688         if (!netif_running(dev))
10689                 return -EAGAIN;
10690
10691         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10692                 return -EINVAL;
10693
10694         if (tg3_flag(tp, USE_PHYLIB)) {
10695                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10696                         return -EAGAIN;
10697                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10698         } else {
10699                 u32 bmcr;
10700
10701                 spin_lock_bh(&tp->lock);
10702                 r = -EINVAL;
10703                 tg3_readphy(tp, MII_BMCR, &bmcr);
10704                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10705                     ((bmcr & BMCR_ANENABLE) ||
10706                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10707                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10708                                                    BMCR_ANENABLE);
10709                         r = 0;
10710                 }
10711                 spin_unlock_bh(&tp->lock);
10712         }
10713
10714         return r;
10715 }
10716
10717 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10718 {
10719         struct tg3 *tp = netdev_priv(dev);
10720
10721         ering->rx_max_pending = tp->rx_std_ring_mask;
10722         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10723                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10724         else
10725                 ering->rx_jumbo_max_pending = 0;
10726
10727         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10728
10729         ering->rx_pending = tp->rx_pending;
10730         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10731                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10732         else
10733                 ering->rx_jumbo_pending = 0;
10734
10735         ering->tx_pending = tp->napi[0].tx_pending;
10736 }
10737
10738 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10739 {
10740         struct tg3 *tp = netdev_priv(dev);
10741         int i, irq_sync = 0, err = 0;
10742
10743         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10744             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10745             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10746             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10747             (tg3_flag(tp, TSO_BUG) &&
10748              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10749                 return -EINVAL;
10750
10751         if (netif_running(dev)) {
10752                 tg3_phy_stop(tp);
10753                 tg3_netif_stop(tp);
10754                 irq_sync = 1;
10755         }
10756
10757         tg3_full_lock(tp, irq_sync);
10758
10759         tp->rx_pending = ering->rx_pending;
10760
10761         if (tg3_flag(tp, MAX_RXPEND_64) &&
10762             tp->rx_pending > 63)
10763                 tp->rx_pending = 63;
10764         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10765
10766         for (i = 0; i < tp->irq_max; i++)
10767                 tp->napi[i].tx_pending = ering->tx_pending;
10768
10769         if (netif_running(dev)) {
10770                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10771                 err = tg3_restart_hw(tp, 1);
10772                 if (!err)
10773                         tg3_netif_start(tp);
10774         }
10775
10776         tg3_full_unlock(tp);
10777
10778         if (irq_sync && !err)
10779                 tg3_phy_start(tp);
10780
10781         return err;
10782 }
10783
10784 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10785 {
10786         struct tg3 *tp = netdev_priv(dev);
10787
10788         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10789
10790         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10791                 epause->rx_pause = 1;
10792         else
10793                 epause->rx_pause = 0;
10794
10795         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10796                 epause->tx_pause = 1;
10797         else
10798                 epause->tx_pause = 0;
10799 }
10800
10801 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10802 {
10803         struct tg3 *tp = netdev_priv(dev);
10804         int err = 0;
10805
10806         if (tg3_flag(tp, USE_PHYLIB)) {
10807                 u32 newadv;
10808                 struct phy_device *phydev;
10809
10810                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10811
10812                 if (!(phydev->supported & SUPPORTED_Pause) ||
10813                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10814                      (epause->rx_pause != epause->tx_pause)))
10815                         return -EINVAL;
10816
10817                 tp->link_config.flowctrl = 0;
10818                 if (epause->rx_pause) {
10819                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10820
10821                         if (epause->tx_pause) {
10822                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10823                                 newadv = ADVERTISED_Pause;
10824                         } else
10825                                 newadv = ADVERTISED_Pause |
10826                                          ADVERTISED_Asym_Pause;
10827                 } else if (epause->tx_pause) {
10828                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10829                         newadv = ADVERTISED_Asym_Pause;
10830                 } else
10831                         newadv = 0;
10832
10833                 if (epause->autoneg)
10834                         tg3_flag_set(tp, PAUSE_AUTONEG);
10835                 else
10836                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10837
10838                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10839                         u32 oldadv = phydev->advertising &
10840                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10841                         if (oldadv != newadv) {
10842                                 phydev->advertising &=
10843                                         ~(ADVERTISED_Pause |
10844                                           ADVERTISED_Asym_Pause);
10845                                 phydev->advertising |= newadv;
10846                                 if (phydev->autoneg) {
10847                                         /*
10848                                          * Always renegotiate the link to
10849                                          * inform our link partner of our
10850                                          * flow control settings, even if the
10851                                          * flow control is forced.  Let
10852                                          * tg3_adjust_link() do the final
10853                                          * flow control setup.
10854                                          */
10855                                         return phy_start_aneg(phydev);
10856                                 }
10857                         }
10858
10859                         if (!epause->autoneg)
10860                                 tg3_setup_flow_control(tp, 0, 0);
10861                 } else {
10862                         tp->link_config.advertising &=
10863                                         ~(ADVERTISED_Pause |
10864                                           ADVERTISED_Asym_Pause);
10865                         tp->link_config.advertising |= newadv;
10866                 }
10867         } else {
10868                 int irq_sync = 0;
10869
10870                 if (netif_running(dev)) {
10871                         tg3_netif_stop(tp);
10872                         irq_sync = 1;
10873                 }
10874
10875                 tg3_full_lock(tp, irq_sync);
10876
10877                 if (epause->autoneg)
10878                         tg3_flag_set(tp, PAUSE_AUTONEG);
10879                 else
10880                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10881                 if (epause->rx_pause)
10882                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10883                 else
10884                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10885                 if (epause->tx_pause)
10886                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10887                 else
10888                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10889
10890                 if (netif_running(dev)) {
10891                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10892                         err = tg3_restart_hw(tp, 1);
10893                         if (!err)
10894                                 tg3_netif_start(tp);
10895                 }
10896
10897                 tg3_full_unlock(tp);
10898         }
10899
10900         return err;
10901 }
10902
10903 static int tg3_get_sset_count(struct net_device *dev, int sset)
10904 {
10905         switch (sset) {
10906         case ETH_SS_TEST:
10907                 return TG3_NUM_TEST;
10908         case ETH_SS_STATS:
10909                 return TG3_NUM_STATS;
10910         default:
10911                 return -EOPNOTSUPP;
10912         }
10913 }
10914
10915 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10916                          u32 *rules __always_unused)
10917 {
10918         struct tg3 *tp = netdev_priv(dev);
10919
10920         if (!tg3_flag(tp, SUPPORT_MSIX))
10921                 return -EOPNOTSUPP;
10922
10923         switch (info->cmd) {
10924         case ETHTOOL_GRXRINGS:
10925                 if (netif_running(tp->dev))
10926                         info->data = tp->irq_cnt;
10927                 else {
10928                         info->data = num_online_cpus();
10929                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10930                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10931                 }
10932
10933                 /* The first interrupt vector only
10934                  * handles link interrupts.
10935                  */
10936                 info->data -= 1;
10937                 return 0;
10938
10939         default:
10940                 return -EOPNOTSUPP;
10941         }
10942 }
10943
10944 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10945 {
10946         u32 size = 0;
10947         struct tg3 *tp = netdev_priv(dev);
10948
10949         if (tg3_flag(tp, SUPPORT_MSIX))
10950                 size = TG3_RSS_INDIR_TBL_SIZE;
10951
10952         return size;
10953 }
10954
10955 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10956 {
10957         struct tg3 *tp = netdev_priv(dev);
10958         int i;
10959
10960         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10961                 indir[i] = tp->rss_ind_tbl[i];
10962
10963         return 0;
10964 }
10965
10966 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10967 {
10968         struct tg3 *tp = netdev_priv(dev);
10969         size_t i;
10970
10971         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10972                 tp->rss_ind_tbl[i] = indir[i];
10973
10974         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10975                 return 0;
10976
10977         /* It is legal to write the indirection
10978          * table while the device is running.
10979          */
10980         tg3_full_lock(tp, 0);
10981         tg3_rss_write_indir_tbl(tp);
10982         tg3_full_unlock(tp);
10983
10984         return 0;
10985 }
10986
10987 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10988 {
10989         switch (stringset) {
10990         case ETH_SS_STATS:
10991                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10992                 break;
10993         case ETH_SS_TEST:
10994                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10995                 break;
10996         default:
10997                 WARN_ON(1);     /* we need a WARN() */
10998                 break;
10999         }
11000 }
11001
11002 static int tg3_set_phys_id(struct net_device *dev,
11003                             enum ethtool_phys_id_state state)
11004 {
11005         struct tg3 *tp = netdev_priv(dev);
11006
11007         if (!netif_running(tp->dev))
11008                 return -EAGAIN;
11009
11010         switch (state) {
11011         case ETHTOOL_ID_ACTIVE:
11012                 return 1;       /* cycle on/off once per second */
11013
11014         case ETHTOOL_ID_ON:
11015                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11016                      LED_CTRL_1000MBPS_ON |
11017                      LED_CTRL_100MBPS_ON |
11018                      LED_CTRL_10MBPS_ON |
11019                      LED_CTRL_TRAFFIC_OVERRIDE |
11020                      LED_CTRL_TRAFFIC_BLINK |
11021                      LED_CTRL_TRAFFIC_LED);
11022                 break;
11023
11024         case ETHTOOL_ID_OFF:
11025                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11026                      LED_CTRL_TRAFFIC_OVERRIDE);
11027                 break;
11028
11029         case ETHTOOL_ID_INACTIVE:
11030                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11031                 break;
11032         }
11033
11034         return 0;
11035 }
11036
11037 static void tg3_get_ethtool_stats(struct net_device *dev,
11038                                    struct ethtool_stats *estats, u64 *tmp_stats)
11039 {
11040         struct tg3 *tp = netdev_priv(dev);
11041
11042         if (tp->hw_stats)
11043                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11044         else
11045                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11046 }
11047
11048 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11049 {
11050         int i;
11051         __be32 *buf;
11052         u32 offset = 0, len = 0;
11053         u32 magic, val;
11054
11055         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11056                 return NULL;
11057
11058         if (magic == TG3_EEPROM_MAGIC) {
11059                 for (offset = TG3_NVM_DIR_START;
11060                      offset < TG3_NVM_DIR_END;
11061                      offset += TG3_NVM_DIRENT_SIZE) {
11062                         if (tg3_nvram_read(tp, offset, &val))
11063                                 return NULL;
11064
11065                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11066                             TG3_NVM_DIRTYPE_EXTVPD)
11067                                 break;
11068                 }
11069
11070                 if (offset != TG3_NVM_DIR_END) {
11071                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11072                         if (tg3_nvram_read(tp, offset + 4, &offset))
11073                                 return NULL;
11074
11075                         offset = tg3_nvram_logical_addr(tp, offset);
11076                 }
11077         }
11078
11079         if (!offset || !len) {
11080                 offset = TG3_NVM_VPD_OFF;
11081                 len = TG3_NVM_VPD_LEN;
11082         }
11083
11084         buf = kmalloc(len, GFP_KERNEL);
11085         if (buf == NULL)
11086                 return NULL;
11087
11088         if (magic == TG3_EEPROM_MAGIC) {
11089                 for (i = 0; i < len; i += 4) {
11090                         /* The data is in little-endian format in NVRAM.
11091                          * Use the big-endian read routines to preserve
11092                          * the byte order as it exists in NVRAM.
11093                          */
11094                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11095                                 goto error;
11096                 }
11097         } else {
11098                 u8 *ptr;
11099                 ssize_t cnt;
11100                 unsigned int pos = 0;
11101
11102                 ptr = (u8 *)&buf[0];
11103                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11104                         cnt = pci_read_vpd(tp->pdev, pos,
11105                                            len - pos, ptr);
11106                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11107                                 cnt = 0;
11108                         else if (cnt < 0)
11109                                 goto error;
11110                 }
11111                 if (pos != len)
11112                         goto error;
11113         }
11114
11115         *vpdlen = len;
11116
11117         return buf;
11118
11119 error:
11120         kfree(buf);
11121         return NULL;
11122 }
11123
11124 #define NVRAM_TEST_SIZE 0x100
11125 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11126 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11127 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11128 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11129 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11130 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11131 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11132 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11133
11134 static int tg3_test_nvram(struct tg3 *tp)
11135 {
11136         u32 csum, magic, len;
11137         __be32 *buf;
11138         int i, j, k, err = 0, size;
11139
11140         if (tg3_flag(tp, NO_NVRAM))
11141                 return 0;
11142
11143         if (tg3_nvram_read(tp, 0, &magic) != 0)
11144                 return -EIO;
11145
11146         if (magic == TG3_EEPROM_MAGIC)
11147                 size = NVRAM_TEST_SIZE;
11148         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11149                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11150                     TG3_EEPROM_SB_FORMAT_1) {
11151                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11152                         case TG3_EEPROM_SB_REVISION_0:
11153                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11154                                 break;
11155                         case TG3_EEPROM_SB_REVISION_2:
11156                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11157                                 break;
11158                         case TG3_EEPROM_SB_REVISION_3:
11159                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11160                                 break;
11161                         case TG3_EEPROM_SB_REVISION_4:
11162                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11163                                 break;
11164                         case TG3_EEPROM_SB_REVISION_5:
11165                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11166                                 break;
11167                         case TG3_EEPROM_SB_REVISION_6:
11168                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11169                                 break;
11170                         default:
11171                                 return -EIO;
11172                         }
11173                 } else
11174                         return 0;
11175         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11176                 size = NVRAM_SELFBOOT_HW_SIZE;
11177         else
11178                 return -EIO;
11179
11180         buf = kmalloc(size, GFP_KERNEL);
11181         if (buf == NULL)
11182                 return -ENOMEM;
11183
11184         err = -EIO;
11185         for (i = 0, j = 0; i < size; i += 4, j++) {
11186                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11187                 if (err)
11188                         break;
11189         }
11190         if (i < size)
11191                 goto out;
11192
11193         /* Selfboot format */
11194         magic = be32_to_cpu(buf[0]);
11195         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11196             TG3_EEPROM_MAGIC_FW) {
11197                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11198
11199                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11200                     TG3_EEPROM_SB_REVISION_2) {
11201                         /* For rev 2, the csum doesn't include the MBA. */
11202                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11203                                 csum8 += buf8[i];
11204                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11205                                 csum8 += buf8[i];
11206                 } else {
11207                         for (i = 0; i < size; i++)
11208                                 csum8 += buf8[i];
11209                 }
11210
11211                 if (csum8 == 0) {
11212                         err = 0;
11213                         goto out;
11214                 }
11215
11216                 err = -EIO;
11217                 goto out;
11218         }
11219
11220         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11221             TG3_EEPROM_MAGIC_HW) {
11222                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11223                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11224                 u8 *buf8 = (u8 *) buf;
11225
11226                 /* Separate the parity bits and the data bytes.  */
11227                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11228                         if ((i == 0) || (i == 8)) {
11229                                 int l;
11230                                 u8 msk;
11231
11232                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11233                                         parity[k++] = buf8[i] & msk;
11234                                 i++;
11235                         } else if (i == 16) {
11236                                 int l;
11237                                 u8 msk;
11238
11239                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11240                                         parity[k++] = buf8[i] & msk;
11241                                 i++;
11242
11243                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11244                                         parity[k++] = buf8[i] & msk;
11245                                 i++;
11246                         }
11247                         data[j++] = buf8[i];
11248                 }
11249
11250                 err = -EIO;
11251                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11252                         u8 hw8 = hweight8(data[i]);
11253
11254                         if ((hw8 & 0x1) && parity[i])
11255                                 goto out;
11256                         else if (!(hw8 & 0x1) && !parity[i])
11257                                 goto out;
11258                 }
11259                 err = 0;
11260                 goto out;
11261         }
11262
11263         err = -EIO;
11264
11265         /* Bootstrap checksum at offset 0x10 */
11266         csum = calc_crc((unsigned char *) buf, 0x10);
11267         if (csum != le32_to_cpu(buf[0x10/4]))
11268                 goto out;
11269
11270         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11271         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11272         if (csum != le32_to_cpu(buf[0xfc/4]))
11273                 goto out;
11274
11275         kfree(buf);
11276
11277         buf = tg3_vpd_readblock(tp, &len);
11278         if (!buf)
11279                 return -ENOMEM;
11280
11281         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11282         if (i > 0) {
11283                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11284                 if (j < 0)
11285                         goto out;
11286
11287                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11288                         goto out;
11289
11290                 i += PCI_VPD_LRDT_TAG_SIZE;
11291                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11292                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11293                 if (j > 0) {
11294                         u8 csum8 = 0;
11295
11296                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11297
11298                         for (i = 0; i <= j; i++)
11299                                 csum8 += ((u8 *)buf)[i];
11300
11301                         if (csum8)
11302                                 goto out;
11303                 }
11304         }
11305
11306         err = 0;
11307
11308 out:
11309         kfree(buf);
11310         return err;
11311 }
11312
11313 #define TG3_SERDES_TIMEOUT_SEC  2
11314 #define TG3_COPPER_TIMEOUT_SEC  6
11315
11316 static int tg3_test_link(struct tg3 *tp)
11317 {
11318         int i, max;
11319
11320         if (!netif_running(tp->dev))
11321                 return -ENODEV;
11322
11323         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11324                 max = TG3_SERDES_TIMEOUT_SEC;
11325         else
11326                 max = TG3_COPPER_TIMEOUT_SEC;
11327
11328         for (i = 0; i < max; i++) {
11329                 if (netif_carrier_ok(tp->dev))
11330                         return 0;
11331
11332                 if (msleep_interruptible(1000))
11333                         break;
11334         }
11335
11336         return -EIO;
11337 }
11338
11339 /* Only test the commonly used registers */
11340 static int tg3_test_registers(struct tg3 *tp)
11341 {
11342         int i, is_5705, is_5750;
11343         u32 offset, read_mask, write_mask, val, save_val, read_val;
11344         static struct {
11345                 u16 offset;
11346                 u16 flags;
11347 #define TG3_FL_5705     0x1
11348 #define TG3_FL_NOT_5705 0x2
11349 #define TG3_FL_NOT_5788 0x4
11350 #define TG3_FL_NOT_5750 0x8
11351                 u32 read_mask;
11352                 u32 write_mask;
11353         } reg_tbl[] = {
11354                 /* MAC Control Registers */
11355                 { MAC_MODE, TG3_FL_NOT_5705,
11356                         0x00000000, 0x00ef6f8c },
11357                 { MAC_MODE, TG3_FL_5705,
11358                         0x00000000, 0x01ef6b8c },
11359                 { MAC_STATUS, TG3_FL_NOT_5705,
11360                         0x03800107, 0x00000000 },
11361                 { MAC_STATUS, TG3_FL_5705,
11362                         0x03800100, 0x00000000 },
11363                 { MAC_ADDR_0_HIGH, 0x0000,
11364                         0x00000000, 0x0000ffff },
11365                 { MAC_ADDR_0_LOW, 0x0000,
11366                         0x00000000, 0xffffffff },
11367                 { MAC_RX_MTU_SIZE, 0x0000,
11368                         0x00000000, 0x0000ffff },
11369                 { MAC_TX_MODE, 0x0000,
11370                         0x00000000, 0x00000070 },
11371                 { MAC_TX_LENGTHS, 0x0000,
11372                         0x00000000, 0x00003fff },
11373                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11374                         0x00000000, 0x000007fc },
11375                 { MAC_RX_MODE, TG3_FL_5705,
11376                         0x00000000, 0x000007dc },
11377                 { MAC_HASH_REG_0, 0x0000,
11378                         0x00000000, 0xffffffff },
11379                 { MAC_HASH_REG_1, 0x0000,
11380                         0x00000000, 0xffffffff },
11381                 { MAC_HASH_REG_2, 0x0000,
11382                         0x00000000, 0xffffffff },
11383                 { MAC_HASH_REG_3, 0x0000,
11384                         0x00000000, 0xffffffff },
11385
11386                 /* Receive Data and Receive BD Initiator Control Registers. */
11387                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11388                         0x00000000, 0xffffffff },
11389                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11390                         0x00000000, 0xffffffff },
11391                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11392                         0x00000000, 0x00000003 },
11393                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11394                         0x00000000, 0xffffffff },
11395                 { RCVDBDI_STD_BD+0, 0x0000,
11396                         0x00000000, 0xffffffff },
11397                 { RCVDBDI_STD_BD+4, 0x0000,
11398                         0x00000000, 0xffffffff },
11399                 { RCVDBDI_STD_BD+8, 0x0000,
11400                         0x00000000, 0xffff0002 },
11401                 { RCVDBDI_STD_BD+0xc, 0x0000,
11402                         0x00000000, 0xffffffff },
11403
11404                 /* Receive BD Initiator Control Registers. */
11405                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11406                         0x00000000, 0xffffffff },
11407                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11408                         0x00000000, 0x000003ff },
11409                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11410                         0x00000000, 0xffffffff },
11411
11412                 /* Host Coalescing Control Registers. */
11413                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11414                         0x00000000, 0x00000004 },
11415                 { HOSTCC_MODE, TG3_FL_5705,
11416                         0x00000000, 0x000000f6 },
11417                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11418                         0x00000000, 0xffffffff },
11419                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11420                         0x00000000, 0x000003ff },
11421                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11422                         0x00000000, 0xffffffff },
11423                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11424                         0x00000000, 0x000003ff },
11425                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11426                         0x00000000, 0xffffffff },
11427                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11428                         0x00000000, 0x000000ff },
11429                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11430                         0x00000000, 0xffffffff },
11431                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11432                         0x00000000, 0x000000ff },
11433                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11434                         0x00000000, 0xffffffff },
11435                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11436                         0x00000000, 0xffffffff },
11437                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11438                         0x00000000, 0xffffffff },
11439                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11440                         0x00000000, 0x000000ff },
11441                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11442                         0x00000000, 0xffffffff },
11443                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11444                         0x00000000, 0x000000ff },
11445                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11446                         0x00000000, 0xffffffff },
11447                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11448                         0x00000000, 0xffffffff },
11449                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11450                         0x00000000, 0xffffffff },
11451                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11452                         0x00000000, 0xffffffff },
11453                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11454                         0x00000000, 0xffffffff },
11455                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11456                         0xffffffff, 0x00000000 },
11457                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11458                         0xffffffff, 0x00000000 },
11459
11460                 /* Buffer Manager Control Registers. */
11461                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11462                         0x00000000, 0x007fff80 },
11463                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11464                         0x00000000, 0x007fffff },
11465                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11466                         0x00000000, 0x0000003f },
11467                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11468                         0x00000000, 0x000001ff },
11469                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11470                         0x00000000, 0x000001ff },
11471                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11472                         0xffffffff, 0x00000000 },
11473                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11474                         0xffffffff, 0x00000000 },
11475
11476                 /* Mailbox Registers */
11477                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11478                         0x00000000, 0x000001ff },
11479                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11480                         0x00000000, 0x000001ff },
11481                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11482                         0x00000000, 0x000007ff },
11483                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11484                         0x00000000, 0x000001ff },
11485
11486                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11487         };
11488
11489         is_5705 = is_5750 = 0;
11490         if (tg3_flag(tp, 5705_PLUS)) {
11491                 is_5705 = 1;
11492                 if (tg3_flag(tp, 5750_PLUS))
11493                         is_5750 = 1;
11494         }
11495
11496         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11497                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11498                         continue;
11499
11500                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11501                         continue;
11502
11503                 if (tg3_flag(tp, IS_5788) &&
11504                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11505                         continue;
11506
11507                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11508                         continue;
11509
11510                 offset = (u32) reg_tbl[i].offset;
11511                 read_mask = reg_tbl[i].read_mask;
11512                 write_mask = reg_tbl[i].write_mask;
11513
11514                 /* Save the original register content */
11515                 save_val = tr32(offset);
11516
11517                 /* Determine the read-only value. */
11518                 read_val = save_val & read_mask;
11519
11520                 /* Write zero to the register, then make sure the read-only bits
11521                  * are not changed and the read/write bits are all zeros.
11522                  */
11523                 tw32(offset, 0);
11524
11525                 val = tr32(offset);
11526
11527                 /* Test the read-only and read/write bits. */
11528                 if (((val & read_mask) != read_val) || (val & write_mask))
11529                         goto out;
11530
11531                 /* Write ones to all the bits defined by RdMask and WrMask, then
11532                  * make sure the read-only bits are not changed and the
11533                  * read/write bits are all ones.
11534                  */
11535                 tw32(offset, read_mask | write_mask);
11536
11537                 val = tr32(offset);
11538
11539                 /* Test the read-only bits. */
11540                 if ((val & read_mask) != read_val)
11541                         goto out;
11542
11543                 /* Test the read/write bits. */
11544                 if ((val & write_mask) != write_mask)
11545                         goto out;
11546
11547                 tw32(offset, save_val);
11548         }
11549
11550         return 0;
11551
11552 out:
11553         if (netif_msg_hw(tp))
11554                 netdev_err(tp->dev,
11555                            "Register test failed at offset %x\n", offset);
11556         tw32(offset, save_val);
11557         return -EIO;
11558 }
11559
11560 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11561 {
11562         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11563         int i;
11564         u32 j;
11565
11566         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11567                 for (j = 0; j < len; j += 4) {
11568                         u32 val;
11569
11570                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11571                         tg3_read_mem(tp, offset + j, &val);
11572                         if (val != test_pattern[i])
11573                                 return -EIO;
11574                 }
11575         }
11576         return 0;
11577 }
11578
11579 static int tg3_test_memory(struct tg3 *tp)
11580 {
11581         static struct mem_entry {
11582                 u32 offset;
11583                 u32 len;
11584         } mem_tbl_570x[] = {
11585                 { 0x00000000, 0x00b50},
11586                 { 0x00002000, 0x1c000},
11587                 { 0xffffffff, 0x00000}
11588         }, mem_tbl_5705[] = {
11589                 { 0x00000100, 0x0000c},
11590                 { 0x00000200, 0x00008},
11591                 { 0x00004000, 0x00800},
11592                 { 0x00006000, 0x01000},
11593                 { 0x00008000, 0x02000},
11594                 { 0x00010000, 0x0e000},
11595                 { 0xffffffff, 0x00000}
11596         }, mem_tbl_5755[] = {
11597                 { 0x00000200, 0x00008},
11598                 { 0x00004000, 0x00800},
11599                 { 0x00006000, 0x00800},
11600                 { 0x00008000, 0x02000},
11601                 { 0x00010000, 0x0c000},
11602                 { 0xffffffff, 0x00000}
11603         }, mem_tbl_5906[] = {
11604                 { 0x00000200, 0x00008},
11605                 { 0x00004000, 0x00400},
11606                 { 0x00006000, 0x00400},
11607                 { 0x00008000, 0x01000},
11608                 { 0x00010000, 0x01000},
11609                 { 0xffffffff, 0x00000}
11610         }, mem_tbl_5717[] = {
11611                 { 0x00000200, 0x00008},
11612                 { 0x00010000, 0x0a000},
11613                 { 0x00020000, 0x13c00},
11614                 { 0xffffffff, 0x00000}
11615         }, mem_tbl_57765[] = {
11616                 { 0x00000200, 0x00008},
11617                 { 0x00004000, 0x00800},
11618                 { 0x00006000, 0x09800},
11619                 { 0x00010000, 0x0a000},
11620                 { 0xffffffff, 0x00000}
11621         };
11622         struct mem_entry *mem_tbl;
11623         int err = 0;
11624         int i;
11625
11626         if (tg3_flag(tp, 5717_PLUS))
11627                 mem_tbl = mem_tbl_5717;
11628         else if (tg3_flag(tp, 57765_CLASS))
11629                 mem_tbl = mem_tbl_57765;
11630         else if (tg3_flag(tp, 5755_PLUS))
11631                 mem_tbl = mem_tbl_5755;
11632         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11633                 mem_tbl = mem_tbl_5906;
11634         else if (tg3_flag(tp, 5705_PLUS))
11635                 mem_tbl = mem_tbl_5705;
11636         else
11637                 mem_tbl = mem_tbl_570x;
11638
11639         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11640                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11641                 if (err)
11642                         break;
11643         }
11644
11645         return err;
11646 }
11647
11648 #define TG3_TSO_MSS             500
11649
11650 #define TG3_TSO_IP_HDR_LEN      20
11651 #define TG3_TSO_TCP_HDR_LEN     20
11652 #define TG3_TSO_TCP_OPT_LEN     12
11653
11654 static const u8 tg3_tso_header[] = {
11655 0x08, 0x00,
11656 0x45, 0x00, 0x00, 0x00,
11657 0x00, 0x00, 0x40, 0x00,
11658 0x40, 0x06, 0x00, 0x00,
11659 0x0a, 0x00, 0x00, 0x01,
11660 0x0a, 0x00, 0x00, 0x02,
11661 0x0d, 0x00, 0xe0, 0x00,
11662 0x00, 0x00, 0x01, 0x00,
11663 0x00, 0x00, 0x02, 0x00,
11664 0x80, 0x10, 0x10, 0x00,
11665 0x14, 0x09, 0x00, 0x00,
11666 0x01, 0x01, 0x08, 0x0a,
11667 0x11, 0x11, 0x11, 0x11,
11668 0x11, 0x11, 0x11, 0x11,
11669 };
11670
11671 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11672 {
11673         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11674         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11675         u32 budget;
11676         struct sk_buff *skb;
11677         u8 *tx_data, *rx_data;
11678         dma_addr_t map;
11679         int num_pkts, tx_len, rx_len, i, err;
11680         struct tg3_rx_buffer_desc *desc;
11681         struct tg3_napi *tnapi, *rnapi;
11682         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11683
11684         tnapi = &tp->napi[0];
11685         rnapi = &tp->napi[0];
11686         if (tp->irq_cnt > 1) {
11687                 if (tg3_flag(tp, ENABLE_RSS))
11688                         rnapi = &tp->napi[1];
11689                 if (tg3_flag(tp, ENABLE_TSS))
11690                         tnapi = &tp->napi[1];
11691         }
11692         coal_now = tnapi->coal_now | rnapi->coal_now;
11693
11694         err = -EIO;
11695
11696         tx_len = pktsz;
11697         skb = netdev_alloc_skb(tp->dev, tx_len);
11698         if (!skb)
11699                 return -ENOMEM;
11700
11701         tx_data = skb_put(skb, tx_len);
11702         memcpy(tx_data, tp->dev->dev_addr, 6);
11703         memset(tx_data + 6, 0x0, 8);
11704
11705         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11706
11707         if (tso_loopback) {
11708                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11709
11710                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11711                               TG3_TSO_TCP_OPT_LEN;
11712
11713                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11714                        sizeof(tg3_tso_header));
11715                 mss = TG3_TSO_MSS;
11716
11717                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11718                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11719
11720                 /* Set the total length field in the IP header */
11721                 iph->tot_len = htons((u16)(mss + hdr_len));
11722
11723                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11724                               TXD_FLAG_CPU_POST_DMA);
11725
11726                 if (tg3_flag(tp, HW_TSO_1) ||
11727                     tg3_flag(tp, HW_TSO_2) ||
11728                     tg3_flag(tp, HW_TSO_3)) {
11729                         struct tcphdr *th;
11730                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11731                         th = (struct tcphdr *)&tx_data[val];
11732                         th->check = 0;
11733                 } else
11734                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11735
11736                 if (tg3_flag(tp, HW_TSO_3)) {
11737                         mss |= (hdr_len & 0xc) << 12;
11738                         if (hdr_len & 0x10)
11739                                 base_flags |= 0x00000010;
11740                         base_flags |= (hdr_len & 0x3e0) << 5;
11741                 } else if (tg3_flag(tp, HW_TSO_2))
11742                         mss |= hdr_len << 9;
11743                 else if (tg3_flag(tp, HW_TSO_1) ||
11744                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11745                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11746                 } else {
11747                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11748                 }
11749
11750                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11751         } else {
11752                 num_pkts = 1;
11753                 data_off = ETH_HLEN;
11754
11755                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11756                     tx_len > VLAN_ETH_FRAME_LEN)
11757                         base_flags |= TXD_FLAG_JMB_PKT;
11758         }
11759
11760         for (i = data_off; i < tx_len; i++)
11761                 tx_data[i] = (u8) (i & 0xff);
11762
11763         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11764         if (pci_dma_mapping_error(tp->pdev, map)) {
11765                 dev_kfree_skb(skb);
11766                 return -EIO;
11767         }
11768
11769         val = tnapi->tx_prod;
11770         tnapi->tx_buffers[val].skb = skb;
11771         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11772
11773         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11774                rnapi->coal_now);
11775
11776         udelay(10);
11777
11778         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11779
11780         budget = tg3_tx_avail(tnapi);
11781         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11782                             base_flags | TXD_FLAG_END, mss, 0)) {
11783                 tnapi->tx_buffers[val].skb = NULL;
11784                 dev_kfree_skb(skb);
11785                 return -EIO;
11786         }
11787
11788         tnapi->tx_prod++;
11789
11790         /* Sync BD data before updating mailbox */
11791         wmb();
11792
11793         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11794         tr32_mailbox(tnapi->prodmbox);
11795
11796         udelay(10);
11797
11798         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11799         for (i = 0; i < 35; i++) {
11800                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11801                        coal_now);
11802
11803                 udelay(10);
11804
11805                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11806                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11807                 if ((tx_idx == tnapi->tx_prod) &&
11808                     (rx_idx == (rx_start_idx + num_pkts)))
11809                         break;
11810         }
11811
11812         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11813         dev_kfree_skb(skb);
11814
11815         if (tx_idx != tnapi->tx_prod)
11816                 goto out;
11817
11818         if (rx_idx != rx_start_idx + num_pkts)
11819                 goto out;
11820
11821         val = data_off;
11822         while (rx_idx != rx_start_idx) {
11823                 desc = &rnapi->rx_rcb[rx_start_idx++];
11824                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11825                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11826
11827                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11828                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11829                         goto out;
11830
11831                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11832                          - ETH_FCS_LEN;
11833
11834                 if (!tso_loopback) {
11835                         if (rx_len != tx_len)
11836                                 goto out;
11837
11838                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11839                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11840                                         goto out;
11841                         } else {
11842                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11843                                         goto out;
11844                         }
11845                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11846                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11847                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11848                         goto out;
11849                 }
11850
11851                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11852                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11853                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11854                                              mapping);
11855                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11856                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11857                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11858                                              mapping);
11859                 } else
11860                         goto out;
11861
11862                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11863                                             PCI_DMA_FROMDEVICE);
11864
11865                 rx_data += TG3_RX_OFFSET(tp);
11866                 for (i = data_off; i < rx_len; i++, val++) {
11867                         if (*(rx_data + i) != (u8) (val & 0xff))
11868                                 goto out;
11869                 }
11870         }
11871
11872         err = 0;
11873
11874         /* tg3_free_rings will unmap and free the rx_data */
11875 out:
11876         return err;
11877 }
11878
11879 #define TG3_STD_LOOPBACK_FAILED         1
11880 #define TG3_JMB_LOOPBACK_FAILED         2
11881 #define TG3_TSO_LOOPBACK_FAILED         4
11882 #define TG3_LOOPBACK_FAILED \
11883         (TG3_STD_LOOPBACK_FAILED | \
11884          TG3_JMB_LOOPBACK_FAILED | \
11885          TG3_TSO_LOOPBACK_FAILED)
11886
11887 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11888 {
11889         int err = -EIO;
11890         u32 eee_cap;
11891         u32 jmb_pkt_sz = 9000;
11892
11893         if (tp->dma_limit)
11894                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11895
11896         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11897         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11898
11899         if (!netif_running(tp->dev)) {
11900                 data[0] = TG3_LOOPBACK_FAILED;
11901                 data[1] = TG3_LOOPBACK_FAILED;
11902                 if (do_extlpbk)
11903                         data[2] = TG3_LOOPBACK_FAILED;
11904                 goto done;
11905         }
11906
11907         err = tg3_reset_hw(tp, 1);
11908         if (err) {
11909                 data[0] = TG3_LOOPBACK_FAILED;
11910                 data[1] = TG3_LOOPBACK_FAILED;
11911                 if (do_extlpbk)
11912                         data[2] = TG3_LOOPBACK_FAILED;
11913                 goto done;
11914         }
11915
11916         if (tg3_flag(tp, ENABLE_RSS)) {
11917                 int i;
11918
11919                 /* Reroute all rx packets to the 1st queue */
11920                 for (i = MAC_RSS_INDIR_TBL_0;
11921                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11922                         tw32(i, 0x0);
11923         }
11924
11925         /* HW errata - mac loopback fails in some cases on 5780.
11926          * Normal traffic and PHY loopback are not affected by
11927          * errata.  Also, the MAC loopback test is deprecated for
11928          * all newer ASIC revisions.
11929          */
11930         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11931             !tg3_flag(tp, CPMU_PRESENT)) {
11932                 tg3_mac_loopback(tp, true);
11933
11934                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11935                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11936
11937                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11938                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11939                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11940
11941                 tg3_mac_loopback(tp, false);
11942         }
11943
11944         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11945             !tg3_flag(tp, USE_PHYLIB)) {
11946                 int i;
11947
11948                 tg3_phy_lpbk_set(tp, 0, false);
11949
11950                 /* Wait for link */
11951                 for (i = 0; i < 100; i++) {
11952                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11953                                 break;
11954                         mdelay(1);
11955                 }
11956
11957                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11958                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11959                 if (tg3_flag(tp, TSO_CAPABLE) &&
11960                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11961                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11962                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11963                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11964                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11965
11966                 if (do_extlpbk) {
11967                         tg3_phy_lpbk_set(tp, 0, true);
11968
11969                         /* All link indications report up, but the hardware
11970                          * isn't really ready for about 20 msec.  Double it
11971                          * to be sure.
11972                          */
11973                         mdelay(40);
11974
11975                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11976                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11977                         if (tg3_flag(tp, TSO_CAPABLE) &&
11978                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11979                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11980                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11981                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11982                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11983                 }
11984
11985                 /* Re-enable gphy autopowerdown. */
11986                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11987                         tg3_phy_toggle_apd(tp, true);
11988         }
11989
11990         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11991
11992 done:
11993         tp->phy_flags |= eee_cap;
11994
11995         return err;
11996 }
11997
11998 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11999                           u64 *data)
12000 {
12001         struct tg3 *tp = netdev_priv(dev);
12002         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12003
12004         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12005             tg3_power_up(tp)) {
12006                 etest->flags |= ETH_TEST_FL_FAILED;
12007                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12008                 return;
12009         }
12010
12011         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12012
12013         if (tg3_test_nvram(tp) != 0) {
12014                 etest->flags |= ETH_TEST_FL_FAILED;
12015                 data[0] = 1;
12016         }
12017         if (!doextlpbk && tg3_test_link(tp)) {
12018                 etest->flags |= ETH_TEST_FL_FAILED;
12019                 data[1] = 1;
12020         }
12021         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12022                 int err, err2 = 0, irq_sync = 0;
12023
12024                 if (netif_running(dev)) {
12025                         tg3_phy_stop(tp);
12026                         tg3_netif_stop(tp);
12027                         irq_sync = 1;
12028                 }
12029
12030                 tg3_full_lock(tp, irq_sync);
12031
12032                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12033                 err = tg3_nvram_lock(tp);
12034                 tg3_halt_cpu(tp, RX_CPU_BASE);
12035                 if (!tg3_flag(tp, 5705_PLUS))
12036                         tg3_halt_cpu(tp, TX_CPU_BASE);
12037                 if (!err)
12038                         tg3_nvram_unlock(tp);
12039
12040                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12041                         tg3_phy_reset(tp);
12042
12043                 if (tg3_test_registers(tp) != 0) {
12044                         etest->flags |= ETH_TEST_FL_FAILED;
12045                         data[2] = 1;
12046                 }
12047
12048                 if (tg3_test_memory(tp) != 0) {
12049                         etest->flags |= ETH_TEST_FL_FAILED;
12050                         data[3] = 1;
12051                 }
12052
12053                 if (doextlpbk)
12054                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12055
12056                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12057                         etest->flags |= ETH_TEST_FL_FAILED;
12058
12059                 tg3_full_unlock(tp);
12060
12061                 if (tg3_test_interrupt(tp) != 0) {
12062                         etest->flags |= ETH_TEST_FL_FAILED;
12063                         data[7] = 1;
12064                 }
12065
12066                 tg3_full_lock(tp, 0);
12067
12068                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12069                 if (netif_running(dev)) {
12070                         tg3_flag_set(tp, INIT_COMPLETE);
12071                         err2 = tg3_restart_hw(tp, 1);
12072                         if (!err2)
12073                                 tg3_netif_start(tp);
12074                 }
12075
12076                 tg3_full_unlock(tp);
12077
12078                 if (irq_sync && !err2)
12079                         tg3_phy_start(tp);
12080         }
12081         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12082                 tg3_power_down(tp);
12083
12084 }
12085
12086 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12087 {
12088         struct mii_ioctl_data *data = if_mii(ifr);
12089         struct tg3 *tp = netdev_priv(dev);
12090         int err;
12091
12092         if (tg3_flag(tp, USE_PHYLIB)) {
12093                 struct phy_device *phydev;
12094                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12095                         return -EAGAIN;
12096                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12097                 return phy_mii_ioctl(phydev, ifr, cmd);
12098         }
12099
12100         switch (cmd) {
12101         case SIOCGMIIPHY:
12102                 data->phy_id = tp->phy_addr;
12103
12104                 /* fallthru */
12105         case SIOCGMIIREG: {
12106                 u32 mii_regval;
12107
12108                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12109                         break;                  /* We have no PHY */
12110
12111                 if (!netif_running(dev))
12112                         return -EAGAIN;
12113
12114                 spin_lock_bh(&tp->lock);
12115                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12116                 spin_unlock_bh(&tp->lock);
12117
12118                 data->val_out = mii_regval;
12119
12120                 return err;
12121         }
12122
12123         case SIOCSMIIREG:
12124                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12125                         break;                  /* We have no PHY */
12126
12127                 if (!netif_running(dev))
12128                         return -EAGAIN;
12129
12130                 spin_lock_bh(&tp->lock);
12131                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12132                 spin_unlock_bh(&tp->lock);
12133
12134                 return err;
12135
12136         default:
12137                 /* do nothing */
12138                 break;
12139         }
12140         return -EOPNOTSUPP;
12141 }
12142
12143 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12144 {
12145         struct tg3 *tp = netdev_priv(dev);
12146
12147         memcpy(ec, &tp->coal, sizeof(*ec));
12148         return 0;
12149 }
12150
12151 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12152 {
12153         struct tg3 *tp = netdev_priv(dev);
12154         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12155         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12156
12157         if (!tg3_flag(tp, 5705_PLUS)) {
12158                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12159                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12160                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12161                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12162         }
12163
12164         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12165             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12166             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12167             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12168             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12169             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12170             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12171             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12172             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12173             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12174                 return -EINVAL;
12175
12176         /* No rx interrupts will be generated if both are zero */
12177         if ((ec->rx_coalesce_usecs == 0) &&
12178             (ec->rx_max_coalesced_frames == 0))
12179                 return -EINVAL;
12180
12181         /* No tx interrupts will be generated if both are zero */
12182         if ((ec->tx_coalesce_usecs == 0) &&
12183             (ec->tx_max_coalesced_frames == 0))
12184                 return -EINVAL;
12185
12186         /* Only copy relevant parameters, ignore all others. */
12187         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12188         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12189         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12190         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12191         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12192         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12193         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12194         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12195         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12196
12197         if (netif_running(dev)) {
12198                 tg3_full_lock(tp, 0);
12199                 __tg3_set_coalesce(tp, &tp->coal);
12200                 tg3_full_unlock(tp);
12201         }
12202         return 0;
12203 }
12204
12205 static const struct ethtool_ops tg3_ethtool_ops = {
12206         .get_settings           = tg3_get_settings,
12207         .set_settings           = tg3_set_settings,
12208         .get_drvinfo            = tg3_get_drvinfo,
12209         .get_regs_len           = tg3_get_regs_len,
12210         .get_regs               = tg3_get_regs,
12211         .get_wol                = tg3_get_wol,
12212         .set_wol                = tg3_set_wol,
12213         .get_msglevel           = tg3_get_msglevel,
12214         .set_msglevel           = tg3_set_msglevel,
12215         .nway_reset             = tg3_nway_reset,
12216         .get_link               = ethtool_op_get_link,
12217         .get_eeprom_len         = tg3_get_eeprom_len,
12218         .get_eeprom             = tg3_get_eeprom,
12219         .set_eeprom             = tg3_set_eeprom,
12220         .get_ringparam          = tg3_get_ringparam,
12221         .set_ringparam          = tg3_set_ringparam,
12222         .get_pauseparam         = tg3_get_pauseparam,
12223         .set_pauseparam         = tg3_set_pauseparam,
12224         .self_test              = tg3_self_test,
12225         .get_strings            = tg3_get_strings,
12226         .set_phys_id            = tg3_set_phys_id,
12227         .get_ethtool_stats      = tg3_get_ethtool_stats,
12228         .get_coalesce           = tg3_get_coalesce,
12229         .set_coalesce           = tg3_set_coalesce,
12230         .get_sset_count         = tg3_get_sset_count,
12231         .get_rxnfc              = tg3_get_rxnfc,
12232         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12233         .get_rxfh_indir         = tg3_get_rxfh_indir,
12234         .set_rxfh_indir         = tg3_set_rxfh_indir,
12235 };
12236
12237 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12238                                                 struct rtnl_link_stats64 *stats)
12239 {
12240         struct tg3 *tp = netdev_priv(dev);
12241
12242         if (!tp->hw_stats)
12243                 return &tp->net_stats_prev;
12244
12245         spin_lock_bh(&tp->lock);
12246         tg3_get_nstats(tp, stats);
12247         spin_unlock_bh(&tp->lock);
12248
12249         return stats;
12250 }
12251
12252 static void tg3_set_rx_mode(struct net_device *dev)
12253 {
12254         struct tg3 *tp = netdev_priv(dev);
12255
12256         if (!netif_running(dev))
12257                 return;
12258
12259         tg3_full_lock(tp, 0);
12260         __tg3_set_rx_mode(dev);
12261         tg3_full_unlock(tp);
12262 }
12263
12264 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12265                                int new_mtu)
12266 {
12267         dev->mtu = new_mtu;
12268
12269         if (new_mtu > ETH_DATA_LEN) {
12270                 if (tg3_flag(tp, 5780_CLASS)) {
12271                         netdev_update_features(dev);
12272                         tg3_flag_clear(tp, TSO_CAPABLE);
12273                 } else {
12274                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12275                 }
12276         } else {
12277                 if (tg3_flag(tp, 5780_CLASS)) {
12278                         tg3_flag_set(tp, TSO_CAPABLE);
12279                         netdev_update_features(dev);
12280                 }
12281                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12282         }
12283 }
12284
12285 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12286 {
12287         struct tg3 *tp = netdev_priv(dev);
12288         int err, reset_phy = 0;
12289
12290         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12291                 return -EINVAL;
12292
12293         if (!netif_running(dev)) {
12294                 /* We'll just catch it later when the
12295                  * device is up'd.
12296                  */
12297                 tg3_set_mtu(dev, tp, new_mtu);
12298                 return 0;
12299         }
12300
12301         tg3_phy_stop(tp);
12302
12303         tg3_netif_stop(tp);
12304
12305         tg3_full_lock(tp, 1);
12306
12307         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12308
12309         tg3_set_mtu(dev, tp, new_mtu);
12310
12311         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12312          * breaks all requests to 256 bytes.
12313          */
12314         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12315                 reset_phy = 1;
12316
12317         err = tg3_restart_hw(tp, reset_phy);
12318
12319         if (!err)
12320                 tg3_netif_start(tp);
12321
12322         tg3_full_unlock(tp);
12323
12324         if (!err)
12325                 tg3_phy_start(tp);
12326
12327         return err;
12328 }
12329
12330 static const struct net_device_ops tg3_netdev_ops = {
12331         .ndo_open               = tg3_open,
12332         .ndo_stop               = tg3_close,
12333         .ndo_start_xmit         = tg3_start_xmit,
12334         .ndo_get_stats64        = tg3_get_stats64,
12335         .ndo_validate_addr      = eth_validate_addr,
12336         .ndo_set_rx_mode        = tg3_set_rx_mode,
12337         .ndo_set_mac_address    = tg3_set_mac_addr,
12338         .ndo_do_ioctl           = tg3_ioctl,
12339         .ndo_tx_timeout         = tg3_tx_timeout,
12340         .ndo_change_mtu         = tg3_change_mtu,
12341         .ndo_fix_features       = tg3_fix_features,
12342         .ndo_set_features       = tg3_set_features,
12343 #ifdef CONFIG_NET_POLL_CONTROLLER
12344         .ndo_poll_controller    = tg3_poll_controller,
12345 #endif
12346 };
12347
12348 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12349 {
12350         u32 cursize, val, magic;
12351
12352         tp->nvram_size = EEPROM_CHIP_SIZE;
12353
12354         if (tg3_nvram_read(tp, 0, &magic) != 0)
12355                 return;
12356
12357         if ((magic != TG3_EEPROM_MAGIC) &&
12358             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12359             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12360                 return;
12361
12362         /*
12363          * Size the chip by reading offsets at increasing powers of two.
12364          * When we encounter our validation signature, we know the addressing
12365          * has wrapped around, and thus have our chip size.
12366          */
12367         cursize = 0x10;
12368
12369         while (cursize < tp->nvram_size) {
12370                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12371                         return;
12372
12373                 if (val == magic)
12374                         break;
12375
12376                 cursize <<= 1;
12377         }
12378
12379         tp->nvram_size = cursize;
12380 }
12381
12382 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12383 {
12384         u32 val;
12385
12386         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12387                 return;
12388
12389         /* Selfboot format */
12390         if (val != TG3_EEPROM_MAGIC) {
12391                 tg3_get_eeprom_size(tp);
12392                 return;
12393         }
12394
12395         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12396                 if (val != 0) {
12397                         /* This is confusing.  We want to operate on the
12398                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12399                          * call will read from NVRAM and byteswap the data
12400                          * according to the byteswapping settings for all
12401                          * other register accesses.  This ensures the data we
12402                          * want will always reside in the lower 16-bits.
12403                          * However, the data in NVRAM is in LE format, which
12404                          * means the data from the NVRAM read will always be
12405                          * opposite the endianness of the CPU.  The 16-bit
12406                          * byteswap then brings the data to CPU endianness.
12407                          */
12408                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12409                         return;
12410                 }
12411         }
12412         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12413 }
12414
12415 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12416 {
12417         u32 nvcfg1;
12418
12419         nvcfg1 = tr32(NVRAM_CFG1);
12420         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12421                 tg3_flag_set(tp, FLASH);
12422         } else {
12423                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12424                 tw32(NVRAM_CFG1, nvcfg1);
12425         }
12426
12427         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12428             tg3_flag(tp, 5780_CLASS)) {
12429                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12430                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12431                         tp->nvram_jedecnum = JEDEC_ATMEL;
12432                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12433                         tg3_flag_set(tp, NVRAM_BUFFERED);
12434                         break;
12435                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12436                         tp->nvram_jedecnum = JEDEC_ATMEL;
12437                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12438                         break;
12439                 case FLASH_VENDOR_ATMEL_EEPROM:
12440                         tp->nvram_jedecnum = JEDEC_ATMEL;
12441                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12442                         tg3_flag_set(tp, NVRAM_BUFFERED);
12443                         break;
12444                 case FLASH_VENDOR_ST:
12445                         tp->nvram_jedecnum = JEDEC_ST;
12446                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12447                         tg3_flag_set(tp, NVRAM_BUFFERED);
12448                         break;
12449                 case FLASH_VENDOR_SAIFUN:
12450                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12451                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12452                         break;
12453                 case FLASH_VENDOR_SST_SMALL:
12454                 case FLASH_VENDOR_SST_LARGE:
12455                         tp->nvram_jedecnum = JEDEC_SST;
12456                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12457                         break;
12458                 }
12459         } else {
12460                 tp->nvram_jedecnum = JEDEC_ATMEL;
12461                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12462                 tg3_flag_set(tp, NVRAM_BUFFERED);
12463         }
12464 }
12465
12466 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12467 {
12468         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12469         case FLASH_5752PAGE_SIZE_256:
12470                 tp->nvram_pagesize = 256;
12471                 break;
12472         case FLASH_5752PAGE_SIZE_512:
12473                 tp->nvram_pagesize = 512;
12474                 break;
12475         case FLASH_5752PAGE_SIZE_1K:
12476                 tp->nvram_pagesize = 1024;
12477                 break;
12478         case FLASH_5752PAGE_SIZE_2K:
12479                 tp->nvram_pagesize = 2048;
12480                 break;
12481         case FLASH_5752PAGE_SIZE_4K:
12482                 tp->nvram_pagesize = 4096;
12483                 break;
12484         case FLASH_5752PAGE_SIZE_264:
12485                 tp->nvram_pagesize = 264;
12486                 break;
12487         case FLASH_5752PAGE_SIZE_528:
12488                 tp->nvram_pagesize = 528;
12489                 break;
12490         }
12491 }
12492
12493 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12494 {
12495         u32 nvcfg1;
12496
12497         nvcfg1 = tr32(NVRAM_CFG1);
12498
12499         /* NVRAM protection for TPM */
12500         if (nvcfg1 & (1 << 27))
12501                 tg3_flag_set(tp, PROTECTED_NVRAM);
12502
12503         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12504         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12505         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12506                 tp->nvram_jedecnum = JEDEC_ATMEL;
12507                 tg3_flag_set(tp, NVRAM_BUFFERED);
12508                 break;
12509         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12510                 tp->nvram_jedecnum = JEDEC_ATMEL;
12511                 tg3_flag_set(tp, NVRAM_BUFFERED);
12512                 tg3_flag_set(tp, FLASH);
12513                 break;
12514         case FLASH_5752VENDOR_ST_M45PE10:
12515         case FLASH_5752VENDOR_ST_M45PE20:
12516         case FLASH_5752VENDOR_ST_M45PE40:
12517                 tp->nvram_jedecnum = JEDEC_ST;
12518                 tg3_flag_set(tp, NVRAM_BUFFERED);
12519                 tg3_flag_set(tp, FLASH);
12520                 break;
12521         }
12522
12523         if (tg3_flag(tp, FLASH)) {
12524                 tg3_nvram_get_pagesize(tp, nvcfg1);
12525         } else {
12526                 /* For eeprom, set pagesize to maximum eeprom size */
12527                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12528
12529                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12530                 tw32(NVRAM_CFG1, nvcfg1);
12531         }
12532 }
12533
12534 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12535 {
12536         u32 nvcfg1, protect = 0;
12537
12538         nvcfg1 = tr32(NVRAM_CFG1);
12539
12540         /* NVRAM protection for TPM */
12541         if (nvcfg1 & (1 << 27)) {
12542                 tg3_flag_set(tp, PROTECTED_NVRAM);
12543                 protect = 1;
12544         }
12545
12546         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12547         switch (nvcfg1) {
12548         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12549         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12550         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12551         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12552                 tp->nvram_jedecnum = JEDEC_ATMEL;
12553                 tg3_flag_set(tp, NVRAM_BUFFERED);
12554                 tg3_flag_set(tp, FLASH);
12555                 tp->nvram_pagesize = 264;
12556                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12557                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12558                         tp->nvram_size = (protect ? 0x3e200 :
12559                                           TG3_NVRAM_SIZE_512KB);
12560                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12561                         tp->nvram_size = (protect ? 0x1f200 :
12562                                           TG3_NVRAM_SIZE_256KB);
12563                 else
12564                         tp->nvram_size = (protect ? 0x1f200 :
12565                                           TG3_NVRAM_SIZE_128KB);
12566                 break;
12567         case FLASH_5752VENDOR_ST_M45PE10:
12568         case FLASH_5752VENDOR_ST_M45PE20:
12569         case FLASH_5752VENDOR_ST_M45PE40:
12570                 tp->nvram_jedecnum = JEDEC_ST;
12571                 tg3_flag_set(tp, NVRAM_BUFFERED);
12572                 tg3_flag_set(tp, FLASH);
12573                 tp->nvram_pagesize = 256;
12574                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12575                         tp->nvram_size = (protect ?
12576                                           TG3_NVRAM_SIZE_64KB :
12577                                           TG3_NVRAM_SIZE_128KB);
12578                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12579                         tp->nvram_size = (protect ?
12580                                           TG3_NVRAM_SIZE_64KB :
12581                                           TG3_NVRAM_SIZE_256KB);
12582                 else
12583                         tp->nvram_size = (protect ?
12584                                           TG3_NVRAM_SIZE_128KB :
12585                                           TG3_NVRAM_SIZE_512KB);
12586                 break;
12587         }
12588 }
12589
12590 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12591 {
12592         u32 nvcfg1;
12593
12594         nvcfg1 = tr32(NVRAM_CFG1);
12595
12596         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12597         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12598         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12599         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12600         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12601                 tp->nvram_jedecnum = JEDEC_ATMEL;
12602                 tg3_flag_set(tp, NVRAM_BUFFERED);
12603                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12604
12605                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12606                 tw32(NVRAM_CFG1, nvcfg1);
12607                 break;
12608         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12609         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12610         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12611         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12612                 tp->nvram_jedecnum = JEDEC_ATMEL;
12613                 tg3_flag_set(tp, NVRAM_BUFFERED);
12614                 tg3_flag_set(tp, FLASH);
12615                 tp->nvram_pagesize = 264;
12616                 break;
12617         case FLASH_5752VENDOR_ST_M45PE10:
12618         case FLASH_5752VENDOR_ST_M45PE20:
12619         case FLASH_5752VENDOR_ST_M45PE40:
12620                 tp->nvram_jedecnum = JEDEC_ST;
12621                 tg3_flag_set(tp, NVRAM_BUFFERED);
12622                 tg3_flag_set(tp, FLASH);
12623                 tp->nvram_pagesize = 256;
12624                 break;
12625         }
12626 }
12627
12628 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12629 {
12630         u32 nvcfg1, protect = 0;
12631
12632         nvcfg1 = tr32(NVRAM_CFG1);
12633
12634         /* NVRAM protection for TPM */
12635         if (nvcfg1 & (1 << 27)) {
12636                 tg3_flag_set(tp, PROTECTED_NVRAM);
12637                 protect = 1;
12638         }
12639
12640         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12641         switch (nvcfg1) {
12642         case FLASH_5761VENDOR_ATMEL_ADB021D:
12643         case FLASH_5761VENDOR_ATMEL_ADB041D:
12644         case FLASH_5761VENDOR_ATMEL_ADB081D:
12645         case FLASH_5761VENDOR_ATMEL_ADB161D:
12646         case FLASH_5761VENDOR_ATMEL_MDB021D:
12647         case FLASH_5761VENDOR_ATMEL_MDB041D:
12648         case FLASH_5761VENDOR_ATMEL_MDB081D:
12649         case FLASH_5761VENDOR_ATMEL_MDB161D:
12650                 tp->nvram_jedecnum = JEDEC_ATMEL;
12651                 tg3_flag_set(tp, NVRAM_BUFFERED);
12652                 tg3_flag_set(tp, FLASH);
12653                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12654                 tp->nvram_pagesize = 256;
12655                 break;
12656         case FLASH_5761VENDOR_ST_A_M45PE20:
12657         case FLASH_5761VENDOR_ST_A_M45PE40:
12658         case FLASH_5761VENDOR_ST_A_M45PE80:
12659         case FLASH_5761VENDOR_ST_A_M45PE16:
12660         case FLASH_5761VENDOR_ST_M_M45PE20:
12661         case FLASH_5761VENDOR_ST_M_M45PE40:
12662         case FLASH_5761VENDOR_ST_M_M45PE80:
12663         case FLASH_5761VENDOR_ST_M_M45PE16:
12664                 tp->nvram_jedecnum = JEDEC_ST;
12665                 tg3_flag_set(tp, NVRAM_BUFFERED);
12666                 tg3_flag_set(tp, FLASH);
12667                 tp->nvram_pagesize = 256;
12668                 break;
12669         }
12670
12671         if (protect) {
12672                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12673         } else {
12674                 switch (nvcfg1) {
12675                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12676                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12677                 case FLASH_5761VENDOR_ST_A_M45PE16:
12678                 case FLASH_5761VENDOR_ST_M_M45PE16:
12679                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12680                         break;
12681                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12682                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12683                 case FLASH_5761VENDOR_ST_A_M45PE80:
12684                 case FLASH_5761VENDOR_ST_M_M45PE80:
12685                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12686                         break;
12687                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12688                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12689                 case FLASH_5761VENDOR_ST_A_M45PE40:
12690                 case FLASH_5761VENDOR_ST_M_M45PE40:
12691                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12692                         break;
12693                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12694                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12695                 case FLASH_5761VENDOR_ST_A_M45PE20:
12696                 case FLASH_5761VENDOR_ST_M_M45PE20:
12697                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12698                         break;
12699                 }
12700         }
12701 }
12702
12703 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12704 {
12705         tp->nvram_jedecnum = JEDEC_ATMEL;
12706         tg3_flag_set(tp, NVRAM_BUFFERED);
12707         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12708 }
12709
12710 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12711 {
12712         u32 nvcfg1;
12713
12714         nvcfg1 = tr32(NVRAM_CFG1);
12715
12716         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12717         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12718         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12719                 tp->nvram_jedecnum = JEDEC_ATMEL;
12720                 tg3_flag_set(tp, NVRAM_BUFFERED);
12721                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12722
12723                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12724                 tw32(NVRAM_CFG1, nvcfg1);
12725                 return;
12726         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12727         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12728         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12729         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12730         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12731         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12732         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12733                 tp->nvram_jedecnum = JEDEC_ATMEL;
12734                 tg3_flag_set(tp, NVRAM_BUFFERED);
12735                 tg3_flag_set(tp, FLASH);
12736
12737                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12738                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12739                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12740                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12741                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12742                         break;
12743                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12744                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12745                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12746                         break;
12747                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12748                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12749                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12750                         break;
12751                 }
12752                 break;
12753         case FLASH_5752VENDOR_ST_M45PE10:
12754         case FLASH_5752VENDOR_ST_M45PE20:
12755         case FLASH_5752VENDOR_ST_M45PE40:
12756                 tp->nvram_jedecnum = JEDEC_ST;
12757                 tg3_flag_set(tp, NVRAM_BUFFERED);
12758                 tg3_flag_set(tp, FLASH);
12759
12760                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12761                 case FLASH_5752VENDOR_ST_M45PE10:
12762                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12763                         break;
12764                 case FLASH_5752VENDOR_ST_M45PE20:
12765                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12766                         break;
12767                 case FLASH_5752VENDOR_ST_M45PE40:
12768                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12769                         break;
12770                 }
12771                 break;
12772         default:
12773                 tg3_flag_set(tp, NO_NVRAM);
12774                 return;
12775         }
12776
12777         tg3_nvram_get_pagesize(tp, nvcfg1);
12778         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12779                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12780 }
12781
12782
12783 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12784 {
12785         u32 nvcfg1;
12786
12787         nvcfg1 = tr32(NVRAM_CFG1);
12788
12789         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12790         case FLASH_5717VENDOR_ATMEL_EEPROM:
12791         case FLASH_5717VENDOR_MICRO_EEPROM:
12792                 tp->nvram_jedecnum = JEDEC_ATMEL;
12793                 tg3_flag_set(tp, NVRAM_BUFFERED);
12794                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12795
12796                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12797                 tw32(NVRAM_CFG1, nvcfg1);
12798                 return;
12799         case FLASH_5717VENDOR_ATMEL_MDB011D:
12800         case FLASH_5717VENDOR_ATMEL_ADB011B:
12801         case FLASH_5717VENDOR_ATMEL_ADB011D:
12802         case FLASH_5717VENDOR_ATMEL_MDB021D:
12803         case FLASH_5717VENDOR_ATMEL_ADB021B:
12804         case FLASH_5717VENDOR_ATMEL_ADB021D:
12805         case FLASH_5717VENDOR_ATMEL_45USPT:
12806                 tp->nvram_jedecnum = JEDEC_ATMEL;
12807                 tg3_flag_set(tp, NVRAM_BUFFERED);
12808                 tg3_flag_set(tp, FLASH);
12809
12810                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12811                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12812                         /* Detect size with tg3_nvram_get_size() */
12813                         break;
12814                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12815                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12816                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12817                         break;
12818                 default:
12819                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12820                         break;
12821                 }
12822                 break;
12823         case FLASH_5717VENDOR_ST_M_M25PE10:
12824         case FLASH_5717VENDOR_ST_A_M25PE10:
12825         case FLASH_5717VENDOR_ST_M_M45PE10:
12826         case FLASH_5717VENDOR_ST_A_M45PE10:
12827         case FLASH_5717VENDOR_ST_M_M25PE20:
12828         case FLASH_5717VENDOR_ST_A_M25PE20:
12829         case FLASH_5717VENDOR_ST_M_M45PE20:
12830         case FLASH_5717VENDOR_ST_A_M45PE20:
12831         case FLASH_5717VENDOR_ST_25USPT:
12832         case FLASH_5717VENDOR_ST_45USPT:
12833                 tp->nvram_jedecnum = JEDEC_ST;
12834                 tg3_flag_set(tp, NVRAM_BUFFERED);
12835                 tg3_flag_set(tp, FLASH);
12836
12837                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12838                 case FLASH_5717VENDOR_ST_M_M25PE20:
12839                 case FLASH_5717VENDOR_ST_M_M45PE20:
12840                         /* Detect size with tg3_nvram_get_size() */
12841                         break;
12842                 case FLASH_5717VENDOR_ST_A_M25PE20:
12843                 case FLASH_5717VENDOR_ST_A_M45PE20:
12844                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12845                         break;
12846                 default:
12847                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12848                         break;
12849                 }
12850                 break;
12851         default:
12852                 tg3_flag_set(tp, NO_NVRAM);
12853                 return;
12854         }
12855
12856         tg3_nvram_get_pagesize(tp, nvcfg1);
12857         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12858                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12859 }
12860
12861 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12862 {
12863         u32 nvcfg1, nvmpinstrp;
12864
12865         nvcfg1 = tr32(NVRAM_CFG1);
12866         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12867
12868         switch (nvmpinstrp) {
12869         case FLASH_5720_EEPROM_HD:
12870         case FLASH_5720_EEPROM_LD:
12871                 tp->nvram_jedecnum = JEDEC_ATMEL;
12872                 tg3_flag_set(tp, NVRAM_BUFFERED);
12873
12874                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12875                 tw32(NVRAM_CFG1, nvcfg1);
12876                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12877                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12878                 else
12879                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12880                 return;
12881         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12882         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12883         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12884         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12885         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12886         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12887         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12888         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12889         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12890         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12891         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12892         case FLASH_5720VENDOR_ATMEL_45USPT:
12893                 tp->nvram_jedecnum = JEDEC_ATMEL;
12894                 tg3_flag_set(tp, NVRAM_BUFFERED);
12895                 tg3_flag_set(tp, FLASH);
12896
12897                 switch (nvmpinstrp) {
12898                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12899                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12900                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12901                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12902                         break;
12903                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12904                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12905                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12906                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12907                         break;
12908                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12909                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12910                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12911                         break;
12912                 default:
12913                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12914                         break;
12915                 }
12916                 break;
12917         case FLASH_5720VENDOR_M_ST_M25PE10:
12918         case FLASH_5720VENDOR_M_ST_M45PE10:
12919         case FLASH_5720VENDOR_A_ST_M25PE10:
12920         case FLASH_5720VENDOR_A_ST_M45PE10:
12921         case FLASH_5720VENDOR_M_ST_M25PE20:
12922         case FLASH_5720VENDOR_M_ST_M45PE20:
12923         case FLASH_5720VENDOR_A_ST_M25PE20:
12924         case FLASH_5720VENDOR_A_ST_M45PE20:
12925         case FLASH_5720VENDOR_M_ST_M25PE40:
12926         case FLASH_5720VENDOR_M_ST_M45PE40:
12927         case FLASH_5720VENDOR_A_ST_M25PE40:
12928         case FLASH_5720VENDOR_A_ST_M45PE40:
12929         case FLASH_5720VENDOR_M_ST_M25PE80:
12930         case FLASH_5720VENDOR_M_ST_M45PE80:
12931         case FLASH_5720VENDOR_A_ST_M25PE80:
12932         case FLASH_5720VENDOR_A_ST_M45PE80:
12933         case FLASH_5720VENDOR_ST_25USPT:
12934         case FLASH_5720VENDOR_ST_45USPT:
12935                 tp->nvram_jedecnum = JEDEC_ST;
12936                 tg3_flag_set(tp, NVRAM_BUFFERED);
12937                 tg3_flag_set(tp, FLASH);
12938
12939                 switch (nvmpinstrp) {
12940                 case FLASH_5720VENDOR_M_ST_M25PE20:
12941                 case FLASH_5720VENDOR_M_ST_M45PE20:
12942                 case FLASH_5720VENDOR_A_ST_M25PE20:
12943                 case FLASH_5720VENDOR_A_ST_M45PE20:
12944                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12945                         break;
12946                 case FLASH_5720VENDOR_M_ST_M25PE40:
12947                 case FLASH_5720VENDOR_M_ST_M45PE40:
12948                 case FLASH_5720VENDOR_A_ST_M25PE40:
12949                 case FLASH_5720VENDOR_A_ST_M45PE40:
12950                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12951                         break;
12952                 case FLASH_5720VENDOR_M_ST_M25PE80:
12953                 case FLASH_5720VENDOR_M_ST_M45PE80:
12954                 case FLASH_5720VENDOR_A_ST_M25PE80:
12955                 case FLASH_5720VENDOR_A_ST_M45PE80:
12956                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12957                         break;
12958                 default:
12959                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12960                         break;
12961                 }
12962                 break;
12963         default:
12964                 tg3_flag_set(tp, NO_NVRAM);
12965                 return;
12966         }
12967
12968         tg3_nvram_get_pagesize(tp, nvcfg1);
12969         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12970                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12971 }
12972
12973 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12974 static void __devinit tg3_nvram_init(struct tg3 *tp)
12975 {
12976         tw32_f(GRC_EEPROM_ADDR,
12977              (EEPROM_ADDR_FSM_RESET |
12978               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12979                EEPROM_ADDR_CLKPERD_SHIFT)));
12980
12981         msleep(1);
12982
12983         /* Enable seeprom accesses. */
12984         tw32_f(GRC_LOCAL_CTRL,
12985              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12986         udelay(100);
12987
12988         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12989             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12990                 tg3_flag_set(tp, NVRAM);
12991
12992                 if (tg3_nvram_lock(tp)) {
12993                         netdev_warn(tp->dev,
12994                                     "Cannot get nvram lock, %s failed\n",
12995                                     __func__);
12996                         return;
12997                 }
12998                 tg3_enable_nvram_access(tp);
12999
13000                 tp->nvram_size = 0;
13001
13002                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13003                         tg3_get_5752_nvram_info(tp);
13004                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13005                         tg3_get_5755_nvram_info(tp);
13006                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13007                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13008                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13009                         tg3_get_5787_nvram_info(tp);
13010                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13011                         tg3_get_5761_nvram_info(tp);
13012                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13013                         tg3_get_5906_nvram_info(tp);
13014                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13015                          tg3_flag(tp, 57765_CLASS))
13016                         tg3_get_57780_nvram_info(tp);
13017                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13018                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13019                         tg3_get_5717_nvram_info(tp);
13020                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13021                         tg3_get_5720_nvram_info(tp);
13022                 else
13023                         tg3_get_nvram_info(tp);
13024
13025                 if (tp->nvram_size == 0)
13026                         tg3_get_nvram_size(tp);
13027
13028                 tg3_disable_nvram_access(tp);
13029                 tg3_nvram_unlock(tp);
13030
13031         } else {
13032                 tg3_flag_clear(tp, NVRAM);
13033                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13034
13035                 tg3_get_eeprom_size(tp);
13036         }
13037 }
13038
13039 struct subsys_tbl_ent {
13040         u16 subsys_vendor, subsys_devid;
13041         u32 phy_id;
13042 };
13043
13044 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13045         /* Broadcom boards. */
13046         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13047           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13048         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13049           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13050         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13051           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13052         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13053           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13054         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13055           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13056         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13057           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13058         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13059           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13060         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13061           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13062         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13063           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13064         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13065           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13066         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13067           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13068
13069         /* 3com boards. */
13070         { TG3PCI_SUBVENDOR_ID_3COM,
13071           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13072         { TG3PCI_SUBVENDOR_ID_3COM,
13073           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13074         { TG3PCI_SUBVENDOR_ID_3COM,
13075           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13076         { TG3PCI_SUBVENDOR_ID_3COM,
13077           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13078         { TG3PCI_SUBVENDOR_ID_3COM,
13079           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13080
13081         /* DELL boards. */
13082         { TG3PCI_SUBVENDOR_ID_DELL,
13083           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13084         { TG3PCI_SUBVENDOR_ID_DELL,
13085           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13086         { TG3PCI_SUBVENDOR_ID_DELL,
13087           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13088         { TG3PCI_SUBVENDOR_ID_DELL,
13089           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13090
13091         /* Compaq boards. */
13092         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13093           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13094         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13095           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13096         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13097           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13098         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13099           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13100         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13101           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13102
13103         /* IBM boards. */
13104         { TG3PCI_SUBVENDOR_ID_IBM,
13105           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13106 };
13107
13108 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13109 {
13110         int i;
13111
13112         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13113                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13114                      tp->pdev->subsystem_vendor) &&
13115                     (subsys_id_to_phy_id[i].subsys_devid ==
13116                      tp->pdev->subsystem_device))
13117                         return &subsys_id_to_phy_id[i];
13118         }
13119         return NULL;
13120 }
13121
13122 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13123 {
13124         u32 val;
13125
13126         tp->phy_id = TG3_PHY_ID_INVALID;
13127         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13128
13129         /* Assume an onboard device and WOL capable by default.  */
13130         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13131         tg3_flag_set(tp, WOL_CAP);
13132
13133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13134                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13135                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13136                         tg3_flag_set(tp, IS_NIC);
13137                 }
13138                 val = tr32(VCPU_CFGSHDW);
13139                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13140                         tg3_flag_set(tp, ASPM_WORKAROUND);
13141                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13142                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13143                         tg3_flag_set(tp, WOL_ENABLE);
13144                         device_set_wakeup_enable(&tp->pdev->dev, true);
13145                 }
13146                 goto done;
13147         }
13148
13149         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13150         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13151                 u32 nic_cfg, led_cfg;
13152                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13153                 int eeprom_phy_serdes = 0;
13154
13155                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13156                 tp->nic_sram_data_cfg = nic_cfg;
13157
13158                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13159                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13160                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13161                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13162                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13163                     (ver > 0) && (ver < 0x100))
13164                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13165
13166                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13167                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13168
13169                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13170                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13171                         eeprom_phy_serdes = 1;
13172
13173                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13174                 if (nic_phy_id != 0) {
13175                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13176                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13177
13178                         eeprom_phy_id  = (id1 >> 16) << 10;
13179                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13180                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13181                 } else
13182                         eeprom_phy_id = 0;
13183
13184                 tp->phy_id = eeprom_phy_id;
13185                 if (eeprom_phy_serdes) {
13186                         if (!tg3_flag(tp, 5705_PLUS))
13187                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13188                         else
13189                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13190                 }
13191
13192                 if (tg3_flag(tp, 5750_PLUS))
13193                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13194                                     SHASTA_EXT_LED_MODE_MASK);
13195                 else
13196                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13197
13198                 switch (led_cfg) {
13199                 default:
13200                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13201                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13202                         break;
13203
13204                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13205                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13206                         break;
13207
13208                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13209                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13210
13211                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13212                          * read on some older 5700/5701 bootcode.
13213                          */
13214                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13215                             ASIC_REV_5700 ||
13216                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13217                             ASIC_REV_5701)
13218                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13219
13220                         break;
13221
13222                 case SHASTA_EXT_LED_SHARED:
13223                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13224                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13225                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13226                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13227                                                  LED_CTRL_MODE_PHY_2);
13228                         break;
13229
13230                 case SHASTA_EXT_LED_MAC:
13231                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13232                         break;
13233
13234                 case SHASTA_EXT_LED_COMBO:
13235                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13236                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13237                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13238                                                  LED_CTRL_MODE_PHY_2);
13239                         break;
13240
13241                 }
13242
13243                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13244                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13245                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13246                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13247
13248                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13249                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13250
13251                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13252                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13253                         if ((tp->pdev->subsystem_vendor ==
13254                              PCI_VENDOR_ID_ARIMA) &&
13255                             (tp->pdev->subsystem_device == 0x205a ||
13256                              tp->pdev->subsystem_device == 0x2063))
13257                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13258                 } else {
13259                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13260                         tg3_flag_set(tp, IS_NIC);
13261                 }
13262
13263                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13264                         tg3_flag_set(tp, ENABLE_ASF);
13265                         if (tg3_flag(tp, 5750_PLUS))
13266                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13267                 }
13268
13269                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13270                     tg3_flag(tp, 5750_PLUS))
13271                         tg3_flag_set(tp, ENABLE_APE);
13272
13273                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13274                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13275                         tg3_flag_clear(tp, WOL_CAP);
13276
13277                 if (tg3_flag(tp, WOL_CAP) &&
13278                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13279                         tg3_flag_set(tp, WOL_ENABLE);
13280                         device_set_wakeup_enable(&tp->pdev->dev, true);
13281                 }
13282
13283                 if (cfg2 & (1 << 17))
13284                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13285
13286                 /* serdes signal pre-emphasis in register 0x590 set by */
13287                 /* bootcode if bit 18 is set */
13288                 if (cfg2 & (1 << 18))
13289                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13290
13291                 if ((tg3_flag(tp, 57765_PLUS) ||
13292                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13293                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13294                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13295                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13296
13297                 if (tg3_flag(tp, PCI_EXPRESS) &&
13298                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13299                     !tg3_flag(tp, 57765_PLUS)) {
13300                         u32 cfg3;
13301
13302                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13303                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13304                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13305                 }
13306
13307                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13308                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13309                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13310                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13311                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13312                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13313         }
13314 done:
13315         if (tg3_flag(tp, WOL_CAP))
13316                 device_set_wakeup_enable(&tp->pdev->dev,
13317                                          tg3_flag(tp, WOL_ENABLE));
13318         else
13319                 device_set_wakeup_capable(&tp->pdev->dev, false);
13320 }
13321
13322 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13323 {
13324         int i;
13325         u32 val;
13326
13327         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13328         tw32(OTP_CTRL, cmd);
13329
13330         /* Wait for up to 1 ms for command to execute. */
13331         for (i = 0; i < 100; i++) {
13332                 val = tr32(OTP_STATUS);
13333                 if (val & OTP_STATUS_CMD_DONE)
13334                         break;
13335                 udelay(10);
13336         }
13337
13338         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13339 }
13340
13341 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13342  * configuration is a 32-bit value that straddles the alignment boundary.
13343  * We do two 32-bit reads and then shift and merge the results.
13344  */
13345 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13346 {
13347         u32 bhalf_otp, thalf_otp;
13348
13349         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13350
13351         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13352                 return 0;
13353
13354         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13355
13356         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13357                 return 0;
13358
13359         thalf_otp = tr32(OTP_READ_DATA);
13360
13361         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13362
13363         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13364                 return 0;
13365
13366         bhalf_otp = tr32(OTP_READ_DATA);
13367
13368         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13369 }
13370
13371 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13372 {
13373         u32 adv = ADVERTISED_Autoneg;
13374
13375         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13376                 adv |= ADVERTISED_1000baseT_Half |
13377                        ADVERTISED_1000baseT_Full;
13378
13379         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13380                 adv |= ADVERTISED_100baseT_Half |
13381                        ADVERTISED_100baseT_Full |
13382                        ADVERTISED_10baseT_Half |
13383                        ADVERTISED_10baseT_Full |
13384                        ADVERTISED_TP;
13385         else
13386                 adv |= ADVERTISED_FIBRE;
13387
13388         tp->link_config.advertising = adv;
13389         tp->link_config.speed = SPEED_UNKNOWN;
13390         tp->link_config.duplex = DUPLEX_UNKNOWN;
13391         tp->link_config.autoneg = AUTONEG_ENABLE;
13392         tp->link_config.active_speed = SPEED_UNKNOWN;
13393         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13394
13395         tp->old_link = -1;
13396 }
13397
13398 static int __devinit tg3_phy_probe(struct tg3 *tp)
13399 {
13400         u32 hw_phy_id_1, hw_phy_id_2;
13401         u32 hw_phy_id, hw_phy_id_masked;
13402         int err;
13403
13404         /* flow control autonegotiation is default behavior */
13405         tg3_flag_set(tp, PAUSE_AUTONEG);
13406         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13407
13408         if (tg3_flag(tp, USE_PHYLIB))
13409                 return tg3_phy_init(tp);
13410
13411         /* Reading the PHY ID register can conflict with ASF
13412          * firmware access to the PHY hardware.
13413          */
13414         err = 0;
13415         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13416                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13417         } else {
13418                 /* Now read the physical PHY_ID from the chip and verify
13419                  * that it is sane.  If it doesn't look good, we fall back
13420                  * to either the hard-coded table based PHY_ID and failing
13421                  * that the value found in the eeprom area.
13422                  */
13423                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13424                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13425
13426                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13427                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13428                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13429
13430                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13431         }
13432
13433         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13434                 tp->phy_id = hw_phy_id;
13435                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13436                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13437                 else
13438                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13439         } else {
13440                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13441                         /* Do nothing, phy ID already set up in
13442                          * tg3_get_eeprom_hw_cfg().
13443                          */
13444                 } else {
13445                         struct subsys_tbl_ent *p;
13446
13447                         /* No eeprom signature?  Try the hardcoded
13448                          * subsys device table.
13449                          */
13450                         p = tg3_lookup_by_subsys(tp);
13451                         if (!p)
13452                                 return -ENODEV;
13453
13454                         tp->phy_id = p->phy_id;
13455                         if (!tp->phy_id ||
13456                             tp->phy_id == TG3_PHY_ID_BCM8002)
13457                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13458                 }
13459         }
13460
13461         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13462             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13463              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13464              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13465               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13466              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13467               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13468                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13469
13470         tg3_phy_init_link_config(tp);
13471
13472         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13473             !tg3_flag(tp, ENABLE_APE) &&
13474             !tg3_flag(tp, ENABLE_ASF)) {
13475                 u32 bmsr, dummy;
13476
13477                 tg3_readphy(tp, MII_BMSR, &bmsr);
13478                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13479                     (bmsr & BMSR_LSTATUS))
13480                         goto skip_phy_reset;
13481
13482                 err = tg3_phy_reset(tp);
13483                 if (err)
13484                         return err;
13485
13486                 tg3_phy_set_wirespeed(tp);
13487
13488                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13489                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13490                                             tp->link_config.flowctrl);
13491
13492                         tg3_writephy(tp, MII_BMCR,
13493                                      BMCR_ANENABLE | BMCR_ANRESTART);
13494                 }
13495         }
13496
13497 skip_phy_reset:
13498         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13499                 err = tg3_init_5401phy_dsp(tp);
13500                 if (err)
13501                         return err;
13502
13503                 err = tg3_init_5401phy_dsp(tp);
13504         }
13505
13506         return err;
13507 }
13508
13509 static void __devinit tg3_read_vpd(struct tg3 *tp)
13510 {
13511         u8 *vpd_data;
13512         unsigned int block_end, rosize, len;
13513         u32 vpdlen;
13514         int j, i = 0;
13515
13516         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13517         if (!vpd_data)
13518                 goto out_no_vpd;
13519
13520         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13521         if (i < 0)
13522                 goto out_not_found;
13523
13524         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13525         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13526         i += PCI_VPD_LRDT_TAG_SIZE;
13527
13528         if (block_end > vpdlen)
13529                 goto out_not_found;
13530
13531         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13532                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13533         if (j > 0) {
13534                 len = pci_vpd_info_field_size(&vpd_data[j]);
13535
13536                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13537                 if (j + len > block_end || len != 4 ||
13538                     memcmp(&vpd_data[j], "1028", 4))
13539                         goto partno;
13540
13541                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13542                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13543                 if (j < 0)
13544                         goto partno;
13545
13546                 len = pci_vpd_info_field_size(&vpd_data[j]);
13547
13548                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13549                 if (j + len > block_end)
13550                         goto partno;
13551
13552                 memcpy(tp->fw_ver, &vpd_data[j], len);
13553                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13554         }
13555
13556 partno:
13557         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13558                                       PCI_VPD_RO_KEYWORD_PARTNO);
13559         if (i < 0)
13560                 goto out_not_found;
13561
13562         len = pci_vpd_info_field_size(&vpd_data[i]);
13563
13564         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13565         if (len > TG3_BPN_SIZE ||
13566             (len + i) > vpdlen)
13567                 goto out_not_found;
13568
13569         memcpy(tp->board_part_number, &vpd_data[i], len);
13570
13571 out_not_found:
13572         kfree(vpd_data);
13573         if (tp->board_part_number[0])
13574                 return;
13575
13576 out_no_vpd:
13577         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13578                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13579                         strcpy(tp->board_part_number, "BCM5717");
13580                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13581                         strcpy(tp->board_part_number, "BCM5718");
13582                 else
13583                         goto nomatch;
13584         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13585                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13586                         strcpy(tp->board_part_number, "BCM57780");
13587                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13588                         strcpy(tp->board_part_number, "BCM57760");
13589                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13590                         strcpy(tp->board_part_number, "BCM57790");
13591                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13592                         strcpy(tp->board_part_number, "BCM57788");
13593                 else
13594                         goto nomatch;
13595         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13596                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13597                         strcpy(tp->board_part_number, "BCM57761");
13598                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13599                         strcpy(tp->board_part_number, "BCM57765");
13600                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13601                         strcpy(tp->board_part_number, "BCM57781");
13602                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13603                         strcpy(tp->board_part_number, "BCM57785");
13604                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13605                         strcpy(tp->board_part_number, "BCM57791");
13606                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13607                         strcpy(tp->board_part_number, "BCM57795");
13608                 else
13609                         goto nomatch;
13610         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13611                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13612                         strcpy(tp->board_part_number, "BCM57762");
13613                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13614                         strcpy(tp->board_part_number, "BCM57766");
13615                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13616                         strcpy(tp->board_part_number, "BCM57782");
13617                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13618                         strcpy(tp->board_part_number, "BCM57786");
13619                 else
13620                         goto nomatch;
13621         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13622                 strcpy(tp->board_part_number, "BCM95906");
13623         } else {
13624 nomatch:
13625                 strcpy(tp->board_part_number, "none");
13626         }
13627 }
13628
13629 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13630 {
13631         u32 val;
13632
13633         if (tg3_nvram_read(tp, offset, &val) ||
13634             (val & 0xfc000000) != 0x0c000000 ||
13635             tg3_nvram_read(tp, offset + 4, &val) ||
13636             val != 0)
13637                 return 0;
13638
13639         return 1;
13640 }
13641
13642 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13643 {
13644         u32 val, offset, start, ver_offset;
13645         int i, dst_off;
13646         bool newver = false;
13647
13648         if (tg3_nvram_read(tp, 0xc, &offset) ||
13649             tg3_nvram_read(tp, 0x4, &start))
13650                 return;
13651
13652         offset = tg3_nvram_logical_addr(tp, offset);
13653
13654         if (tg3_nvram_read(tp, offset, &val))
13655                 return;
13656
13657         if ((val & 0xfc000000) == 0x0c000000) {
13658                 if (tg3_nvram_read(tp, offset + 4, &val))
13659                         return;
13660
13661                 if (val == 0)
13662                         newver = true;
13663         }
13664
13665         dst_off = strlen(tp->fw_ver);
13666
13667         if (newver) {
13668                 if (TG3_VER_SIZE - dst_off < 16 ||
13669                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13670                         return;
13671
13672                 offset = offset + ver_offset - start;
13673                 for (i = 0; i < 16; i += 4) {
13674                         __be32 v;
13675                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13676                                 return;
13677
13678                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13679                 }
13680         } else {
13681                 u32 major, minor;
13682
13683                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13684                         return;
13685
13686                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13687                         TG3_NVM_BCVER_MAJSFT;
13688                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13689                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13690                          "v%d.%02d", major, minor);
13691         }
13692 }
13693
13694 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13695 {
13696         u32 val, major, minor;
13697
13698         /* Use native endian representation */
13699         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13700                 return;
13701
13702         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13703                 TG3_NVM_HWSB_CFG1_MAJSFT;
13704         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13705                 TG3_NVM_HWSB_CFG1_MINSFT;
13706
13707         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13708 }
13709
13710 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13711 {
13712         u32 offset, major, minor, build;
13713
13714         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13715
13716         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13717                 return;
13718
13719         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13720         case TG3_EEPROM_SB_REVISION_0:
13721                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13722                 break;
13723         case TG3_EEPROM_SB_REVISION_2:
13724                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13725                 break;
13726         case TG3_EEPROM_SB_REVISION_3:
13727                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13728                 break;
13729         case TG3_EEPROM_SB_REVISION_4:
13730                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13731                 break;
13732         case TG3_EEPROM_SB_REVISION_5:
13733                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13734                 break;
13735         case TG3_EEPROM_SB_REVISION_6:
13736                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13737                 break;
13738         default:
13739                 return;
13740         }
13741
13742         if (tg3_nvram_read(tp, offset, &val))
13743                 return;
13744
13745         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13746                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13747         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13748                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13749         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13750
13751         if (minor > 99 || build > 26)
13752                 return;
13753
13754         offset = strlen(tp->fw_ver);
13755         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13756                  " v%d.%02d", major, minor);
13757
13758         if (build > 0) {
13759                 offset = strlen(tp->fw_ver);
13760                 if (offset < TG3_VER_SIZE - 1)
13761                         tp->fw_ver[offset] = 'a' + build - 1;
13762         }
13763 }
13764
13765 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13766 {
13767         u32 val, offset, start;
13768         int i, vlen;
13769
13770         for (offset = TG3_NVM_DIR_START;
13771              offset < TG3_NVM_DIR_END;
13772              offset += TG3_NVM_DIRENT_SIZE) {
13773                 if (tg3_nvram_read(tp, offset, &val))
13774                         return;
13775
13776                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13777                         break;
13778         }
13779
13780         if (offset == TG3_NVM_DIR_END)
13781                 return;
13782
13783         if (!tg3_flag(tp, 5705_PLUS))
13784                 start = 0x08000000;
13785         else if (tg3_nvram_read(tp, offset - 4, &start))
13786                 return;
13787
13788         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13789             !tg3_fw_img_is_valid(tp, offset) ||
13790             tg3_nvram_read(tp, offset + 8, &val))
13791                 return;
13792
13793         offset += val - start;
13794
13795         vlen = strlen(tp->fw_ver);
13796
13797         tp->fw_ver[vlen++] = ',';
13798         tp->fw_ver[vlen++] = ' ';
13799
13800         for (i = 0; i < 4; i++) {
13801                 __be32 v;
13802                 if (tg3_nvram_read_be32(tp, offset, &v))
13803                         return;
13804
13805                 offset += sizeof(v);
13806
13807                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13808                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13809                         break;
13810                 }
13811
13812                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13813                 vlen += sizeof(v);
13814         }
13815 }
13816
13817 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13818 {
13819         int vlen;
13820         u32 apedata;
13821         char *fwtype;
13822
13823         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13824                 return;
13825
13826         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13827         if (apedata != APE_SEG_SIG_MAGIC)
13828                 return;
13829
13830         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13831         if (!(apedata & APE_FW_STATUS_READY))
13832                 return;
13833
13834         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13835
13836         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13837                 tg3_flag_set(tp, APE_HAS_NCSI);
13838                 fwtype = "NCSI";
13839         } else {
13840                 fwtype = "DASH";
13841         }
13842
13843         vlen = strlen(tp->fw_ver);
13844
13845         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13846                  fwtype,
13847                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13848                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13849                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13850                  (apedata & APE_FW_VERSION_BLDMSK));
13851 }
13852
13853 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13854 {
13855         u32 val;
13856         bool vpd_vers = false;
13857
13858         if (tp->fw_ver[0] != 0)
13859                 vpd_vers = true;
13860
13861         if (tg3_flag(tp, NO_NVRAM)) {
13862                 strcat(tp->fw_ver, "sb");
13863                 return;
13864         }
13865
13866         if (tg3_nvram_read(tp, 0, &val))
13867                 return;
13868
13869         if (val == TG3_EEPROM_MAGIC)
13870                 tg3_read_bc_ver(tp);
13871         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13872                 tg3_read_sb_ver(tp, val);
13873         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13874                 tg3_read_hwsb_ver(tp);
13875         else
13876                 return;
13877
13878         if (vpd_vers)
13879                 goto done;
13880
13881         if (tg3_flag(tp, ENABLE_APE)) {
13882                 if (tg3_flag(tp, ENABLE_ASF))
13883                         tg3_read_dash_ver(tp);
13884         } else if (tg3_flag(tp, ENABLE_ASF)) {
13885                 tg3_read_mgmtfw_ver(tp);
13886         }
13887
13888 done:
13889         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13890 }
13891
13892 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13893 {
13894         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13895                 return TG3_RX_RET_MAX_SIZE_5717;
13896         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13897                 return TG3_RX_RET_MAX_SIZE_5700;
13898         else
13899                 return TG3_RX_RET_MAX_SIZE_5705;
13900 }
13901
13902 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13903         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13904         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13905         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13906         { },
13907 };
13908
13909 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13910 {
13911         struct pci_dev *peer;
13912         unsigned int func, devnr = tp->pdev->devfn & ~7;
13913
13914         for (func = 0; func < 8; func++) {
13915                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13916                 if (peer && peer != tp->pdev)
13917                         break;
13918                 pci_dev_put(peer);
13919         }
13920         /* 5704 can be configured in single-port mode, set peer to
13921          * tp->pdev in that case.
13922          */
13923         if (!peer) {
13924                 peer = tp->pdev;
13925                 return peer;
13926         }
13927
13928         /*
13929          * We don't need to keep the refcount elevated; there's no way
13930          * to remove one half of this device without removing the other
13931          */
13932         pci_dev_put(peer);
13933
13934         return peer;
13935 }
13936
13937 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13938 {
13939         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13940         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13941                 u32 reg;
13942
13943                 /* All devices that use the alternate
13944                  * ASIC REV location have a CPMU.
13945                  */
13946                 tg3_flag_set(tp, CPMU_PRESENT);
13947
13948                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13949                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13950                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13951                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13952                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13953                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13954                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13955                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13956                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13957                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13958                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13959                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13960                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13961                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13962                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13963                         reg = TG3PCI_GEN15_PRODID_ASICREV;
13964                 else
13965                         reg = TG3PCI_PRODID_ASICREV;
13966
13967                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13968         }
13969
13970         /* Wrong chip ID in 5752 A0. This code can be removed later
13971          * as A0 is not in production.
13972          */
13973         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13974                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13975
13976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13979                 tg3_flag_set(tp, 5717_PLUS);
13980
13981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13982             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13983                 tg3_flag_set(tp, 57765_CLASS);
13984
13985         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13986                 tg3_flag_set(tp, 57765_PLUS);
13987
13988         /* Intentionally exclude ASIC_REV_5906 */
13989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13992             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13993             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13994             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13995             tg3_flag(tp, 57765_PLUS))
13996                 tg3_flag_set(tp, 5755_PLUS);
13997
13998         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13999             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14000                 tg3_flag_set(tp, 5780_CLASS);
14001
14002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14003             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14004             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14005             tg3_flag(tp, 5755_PLUS) ||
14006             tg3_flag(tp, 5780_CLASS))
14007                 tg3_flag_set(tp, 5750_PLUS);
14008
14009         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14010             tg3_flag(tp, 5750_PLUS))
14011                 tg3_flag_set(tp, 5705_PLUS);
14012 }
14013
14014 static int __devinit tg3_get_invariants(struct tg3 *tp)
14015 {
14016         u32 misc_ctrl_reg;
14017         u32 pci_state_reg, grc_misc_cfg;
14018         u32 val;
14019         u16 pci_cmd;
14020         int err;
14021
14022         /* Force memory write invalidate off.  If we leave it on,
14023          * then on 5700_BX chips we have to enable a workaround.
14024          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14025          * to match the cacheline size.  The Broadcom driver have this
14026          * workaround but turns MWI off all the times so never uses
14027          * it.  This seems to suggest that the workaround is insufficient.
14028          */
14029         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14030         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14031         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14032
14033         /* Important! -- Make sure register accesses are byteswapped
14034          * correctly.  Also, for those chips that require it, make
14035          * sure that indirect register accesses are enabled before
14036          * the first operation.
14037          */
14038         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14039                               &misc_ctrl_reg);
14040         tp->misc_host_ctrl |= (misc_ctrl_reg &
14041                                MISC_HOST_CTRL_CHIPREV);
14042         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14043                                tp->misc_host_ctrl);
14044
14045         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14046
14047         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14048          * we need to disable memory and use config. cycles
14049          * only to access all registers. The 5702/03 chips
14050          * can mistakenly decode the special cycles from the
14051          * ICH chipsets as memory write cycles, causing corruption
14052          * of register and memory space. Only certain ICH bridges
14053          * will drive special cycles with non-zero data during the
14054          * address phase which can fall within the 5703's address
14055          * range. This is not an ICH bug as the PCI spec allows
14056          * non-zero address during special cycles. However, only
14057          * these ICH bridges are known to drive non-zero addresses
14058          * during special cycles.
14059          *
14060          * Since special cycles do not cross PCI bridges, we only
14061          * enable this workaround if the 5703 is on the secondary
14062          * bus of these ICH bridges.
14063          */
14064         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14065             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14066                 static struct tg3_dev_id {
14067                         u32     vendor;
14068                         u32     device;
14069                         u32     rev;
14070                 } ich_chipsets[] = {
14071                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14072                           PCI_ANY_ID },
14073                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14074                           PCI_ANY_ID },
14075                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14076                           0xa },
14077                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14078                           PCI_ANY_ID },
14079                         { },
14080                 };
14081                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14082                 struct pci_dev *bridge = NULL;
14083
14084                 while (pci_id->vendor != 0) {
14085                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14086                                                 bridge);
14087                         if (!bridge) {
14088                                 pci_id++;
14089                                 continue;
14090                         }
14091                         if (pci_id->rev != PCI_ANY_ID) {
14092                                 if (bridge->revision > pci_id->rev)
14093                                         continue;
14094                         }
14095                         if (bridge->subordinate &&
14096                             (bridge->subordinate->number ==
14097                              tp->pdev->bus->number)) {
14098                                 tg3_flag_set(tp, ICH_WORKAROUND);
14099                                 pci_dev_put(bridge);
14100                                 break;
14101                         }
14102                 }
14103         }
14104
14105         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14106                 static struct tg3_dev_id {
14107                         u32     vendor;
14108                         u32     device;
14109                 } bridge_chipsets[] = {
14110                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14111                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14112                         { },
14113                 };
14114                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14115                 struct pci_dev *bridge = NULL;
14116
14117                 while (pci_id->vendor != 0) {
14118                         bridge = pci_get_device(pci_id->vendor,
14119                                                 pci_id->device,
14120                                                 bridge);
14121                         if (!bridge) {
14122                                 pci_id++;
14123                                 continue;
14124                         }
14125                         if (bridge->subordinate &&
14126                             (bridge->subordinate->number <=
14127                              tp->pdev->bus->number) &&
14128                             (bridge->subordinate->subordinate >=
14129                              tp->pdev->bus->number)) {
14130                                 tg3_flag_set(tp, 5701_DMA_BUG);
14131                                 pci_dev_put(bridge);
14132                                 break;
14133                         }
14134                 }
14135         }
14136
14137         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14138          * DMA addresses > 40-bit. This bridge may have other additional
14139          * 57xx devices behind it in some 4-port NIC designs for example.
14140          * Any tg3 device found behind the bridge will also need the 40-bit
14141          * DMA workaround.
14142          */
14143         if (tg3_flag(tp, 5780_CLASS)) {
14144                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14145                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14146         } else {
14147                 struct pci_dev *bridge = NULL;
14148
14149                 do {
14150                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14151                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14152                                                 bridge);
14153                         if (bridge && bridge->subordinate &&
14154                             (bridge->subordinate->number <=
14155                              tp->pdev->bus->number) &&
14156                             (bridge->subordinate->subordinate >=
14157                              tp->pdev->bus->number)) {
14158                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14159                                 pci_dev_put(bridge);
14160                                 break;
14161                         }
14162                 } while (bridge);
14163         }
14164
14165         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14166             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14167                 tp->pdev_peer = tg3_find_peer(tp);
14168
14169         /* Determine TSO capabilities */
14170         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14171                 ; /* Do nothing. HW bug. */
14172         else if (tg3_flag(tp, 57765_PLUS))
14173                 tg3_flag_set(tp, HW_TSO_3);
14174         else if (tg3_flag(tp, 5755_PLUS) ||
14175                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14176                 tg3_flag_set(tp, HW_TSO_2);
14177         else if (tg3_flag(tp, 5750_PLUS)) {
14178                 tg3_flag_set(tp, HW_TSO_1);
14179                 tg3_flag_set(tp, TSO_BUG);
14180                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14181                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14182                         tg3_flag_clear(tp, TSO_BUG);
14183         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14184                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14185                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14186                         tg3_flag_set(tp, TSO_BUG);
14187                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14188                         tp->fw_needed = FIRMWARE_TG3TSO5;
14189                 else
14190                         tp->fw_needed = FIRMWARE_TG3TSO;
14191         }
14192
14193         /* Selectively allow TSO based on operating conditions */
14194         if (tg3_flag(tp, HW_TSO_1) ||
14195             tg3_flag(tp, HW_TSO_2) ||
14196             tg3_flag(tp, HW_TSO_3) ||
14197             tp->fw_needed) {
14198                 /* For firmware TSO, assume ASF is disabled.
14199                  * We'll disable TSO later if we discover ASF
14200                  * is enabled in tg3_get_eeprom_hw_cfg().
14201                  */
14202                 tg3_flag_set(tp, TSO_CAPABLE);
14203         } else {
14204                 tg3_flag_clear(tp, TSO_CAPABLE);
14205                 tg3_flag_clear(tp, TSO_BUG);
14206                 tp->fw_needed = NULL;
14207         }
14208
14209         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14210                 tp->fw_needed = FIRMWARE_TG3;
14211
14212         tp->irq_max = 1;
14213
14214         if (tg3_flag(tp, 5750_PLUS)) {
14215                 tg3_flag_set(tp, SUPPORT_MSI);
14216                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14217                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14218                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14219                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14220                      tp->pdev_peer == tp->pdev))
14221                         tg3_flag_clear(tp, SUPPORT_MSI);
14222
14223                 if (tg3_flag(tp, 5755_PLUS) ||
14224                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14225                         tg3_flag_set(tp, 1SHOT_MSI);
14226                 }
14227
14228                 if (tg3_flag(tp, 57765_PLUS)) {
14229                         tg3_flag_set(tp, SUPPORT_MSIX);
14230                         tp->irq_max = TG3_IRQ_MAX_VECS;
14231                         tg3_rss_init_dflt_indir_tbl(tp);
14232                 }
14233         }
14234
14235         if (tg3_flag(tp, 5755_PLUS))
14236                 tg3_flag_set(tp, SHORT_DMA_BUG);
14237
14238         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14239                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14240
14241         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14242             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14243             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14244                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14245
14246         if (tg3_flag(tp, 57765_PLUS) &&
14247             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14248                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14249
14250         if (!tg3_flag(tp, 5705_PLUS) ||
14251             tg3_flag(tp, 5780_CLASS) ||
14252             tg3_flag(tp, USE_JUMBO_BDFLAG))
14253                 tg3_flag_set(tp, JUMBO_CAPABLE);
14254
14255         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14256                               &pci_state_reg);
14257
14258         if (pci_is_pcie(tp->pdev)) {
14259                 u16 lnkctl;
14260
14261                 tg3_flag_set(tp, PCI_EXPRESS);
14262
14263                 pci_read_config_word(tp->pdev,
14264                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14265                                      &lnkctl);
14266                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14267                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14268                             ASIC_REV_5906) {
14269                                 tg3_flag_clear(tp, HW_TSO_2);
14270                                 tg3_flag_clear(tp, TSO_CAPABLE);
14271                         }
14272                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14273                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14274                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14275                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14276                                 tg3_flag_set(tp, CLKREQ_BUG);
14277                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14278                         tg3_flag_set(tp, L1PLLPD_EN);
14279                 }
14280         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14281                 /* BCM5785 devices are effectively PCIe devices, and should
14282                  * follow PCIe codepaths, but do not have a PCIe capabilities
14283                  * section.
14284                  */
14285                 tg3_flag_set(tp, PCI_EXPRESS);
14286         } else if (!tg3_flag(tp, 5705_PLUS) ||
14287                    tg3_flag(tp, 5780_CLASS)) {
14288                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14289                 if (!tp->pcix_cap) {
14290                         dev_err(&tp->pdev->dev,
14291                                 "Cannot find PCI-X capability, aborting\n");
14292                         return -EIO;
14293                 }
14294
14295                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14296                         tg3_flag_set(tp, PCIX_MODE);
14297         }
14298
14299         /* If we have an AMD 762 or VIA K8T800 chipset, write
14300          * reordering to the mailbox registers done by the host
14301          * controller can cause major troubles.  We read back from
14302          * every mailbox register write to force the writes to be
14303          * posted to the chip in order.
14304          */
14305         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14306             !tg3_flag(tp, PCI_EXPRESS))
14307                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14308
14309         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14310                              &tp->pci_cacheline_sz);
14311         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14312                              &tp->pci_lat_timer);
14313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14314             tp->pci_lat_timer < 64) {
14315                 tp->pci_lat_timer = 64;
14316                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14317                                       tp->pci_lat_timer);
14318         }
14319
14320         /* Important! -- It is critical that the PCI-X hw workaround
14321          * situation is decided before the first MMIO register access.
14322          */
14323         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14324                 /* 5700 BX chips need to have their TX producer index
14325                  * mailboxes written twice to workaround a bug.
14326                  */
14327                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14328
14329                 /* If we are in PCI-X mode, enable register write workaround.
14330                  *
14331                  * The workaround is to use indirect register accesses
14332                  * for all chip writes not to mailbox registers.
14333                  */
14334                 if (tg3_flag(tp, PCIX_MODE)) {
14335                         u32 pm_reg;
14336
14337                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14338
14339                         /* The chip can have it's power management PCI config
14340                          * space registers clobbered due to this bug.
14341                          * So explicitly force the chip into D0 here.
14342                          */
14343                         pci_read_config_dword(tp->pdev,
14344                                               tp->pm_cap + PCI_PM_CTRL,
14345                                               &pm_reg);
14346                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14347                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14348                         pci_write_config_dword(tp->pdev,
14349                                                tp->pm_cap + PCI_PM_CTRL,
14350                                                pm_reg);
14351
14352                         /* Also, force SERR#/PERR# in PCI command. */
14353                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14354                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14355                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14356                 }
14357         }
14358
14359         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14360                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14361         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14362                 tg3_flag_set(tp, PCI_32BIT);
14363
14364         /* Chip-specific fixup from Broadcom driver */
14365         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14366             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14367                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14368                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14369         }
14370
14371         /* Default fast path register access methods */
14372         tp->read32 = tg3_read32;
14373         tp->write32 = tg3_write32;
14374         tp->read32_mbox = tg3_read32;
14375         tp->write32_mbox = tg3_write32;
14376         tp->write32_tx_mbox = tg3_write32;
14377         tp->write32_rx_mbox = tg3_write32;
14378
14379         /* Various workaround register access methods */
14380         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14381                 tp->write32 = tg3_write_indirect_reg32;
14382         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14383                  (tg3_flag(tp, PCI_EXPRESS) &&
14384                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14385                 /*
14386                  * Back to back register writes can cause problems on these
14387                  * chips, the workaround is to read back all reg writes
14388                  * except those to mailbox regs.
14389                  *
14390                  * See tg3_write_indirect_reg32().
14391                  */
14392                 tp->write32 = tg3_write_flush_reg32;
14393         }
14394
14395         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14396                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14397                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14398                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14399         }
14400
14401         if (tg3_flag(tp, ICH_WORKAROUND)) {
14402                 tp->read32 = tg3_read_indirect_reg32;
14403                 tp->write32 = tg3_write_indirect_reg32;
14404                 tp->read32_mbox = tg3_read_indirect_mbox;
14405                 tp->write32_mbox = tg3_write_indirect_mbox;
14406                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14407                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14408
14409                 iounmap(tp->regs);
14410                 tp->regs = NULL;
14411
14412                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14413                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14414                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14415         }
14416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14417                 tp->read32_mbox = tg3_read32_mbox_5906;
14418                 tp->write32_mbox = tg3_write32_mbox_5906;
14419                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14420                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14421         }
14422
14423         if (tp->write32 == tg3_write_indirect_reg32 ||
14424             (tg3_flag(tp, PCIX_MODE) &&
14425              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14426               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14427                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14428
14429         /* The memory arbiter has to be enabled in order for SRAM accesses
14430          * to succeed.  Normally on powerup the tg3 chip firmware will make
14431          * sure it is enabled, but other entities such as system netboot
14432          * code might disable it.
14433          */
14434         val = tr32(MEMARB_MODE);
14435         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14436
14437         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14439             tg3_flag(tp, 5780_CLASS)) {
14440                 if (tg3_flag(tp, PCIX_MODE)) {
14441                         pci_read_config_dword(tp->pdev,
14442                                               tp->pcix_cap + PCI_X_STATUS,
14443                                               &val);
14444                         tp->pci_fn = val & 0x7;
14445                 }
14446         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14447                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14448                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14449                     NIC_SRAM_CPMUSTAT_SIG) {
14450                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14451                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14452                 }
14453         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14454                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14455                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14456                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14457                     NIC_SRAM_CPMUSTAT_SIG) {
14458                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14459                                      TG3_CPMU_STATUS_FSHFT_5719;
14460                 }
14461         }
14462
14463         /* Get eeprom hw config before calling tg3_set_power_state().
14464          * In particular, the TG3_FLAG_IS_NIC flag must be
14465          * determined before calling tg3_set_power_state() so that
14466          * we know whether or not to switch out of Vaux power.
14467          * When the flag is set, it means that GPIO1 is used for eeprom
14468          * write protect and also implies that it is a LOM where GPIOs
14469          * are not used to switch power.
14470          */
14471         tg3_get_eeprom_hw_cfg(tp);
14472
14473         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14474                 tg3_flag_clear(tp, TSO_CAPABLE);
14475                 tg3_flag_clear(tp, TSO_BUG);
14476                 tp->fw_needed = NULL;
14477         }
14478
14479         if (tg3_flag(tp, ENABLE_APE)) {
14480                 /* Allow reads and writes to the
14481                  * APE register and memory space.
14482                  */
14483                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14484                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14485                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14486                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14487                                        pci_state_reg);
14488
14489                 tg3_ape_lock_init(tp);
14490         }
14491
14492         /* Set up tp->grc_local_ctrl before calling
14493          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14494          * will bring 5700's external PHY out of reset.
14495          * It is also used as eeprom write protect on LOMs.
14496          */
14497         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14498         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14499             tg3_flag(tp, EEPROM_WRITE_PROT))
14500                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14501                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14502         /* Unused GPIO3 must be driven as output on 5752 because there
14503          * are no pull-up resistors on unused GPIO pins.
14504          */
14505         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14506                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14507
14508         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14509             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14510             tg3_flag(tp, 57765_CLASS))
14511                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14512
14513         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14514             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14515                 /* Turn off the debug UART. */
14516                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14517                 if (tg3_flag(tp, IS_NIC))
14518                         /* Keep VMain power. */
14519                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14520                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14521         }
14522
14523         /* Switch out of Vaux if it is a NIC */
14524         tg3_pwrsrc_switch_to_vmain(tp);
14525
14526         /* Derive initial jumbo mode from MTU assigned in
14527          * ether_setup() via the alloc_etherdev() call
14528          */
14529         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14530                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14531
14532         /* Determine WakeOnLan speed to use. */
14533         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14534             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14535             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14536             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14537                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14538         } else {
14539                 tg3_flag_set(tp, WOL_SPEED_100MB);
14540         }
14541
14542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14543                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14544
14545         /* A few boards don't want Ethernet@WireSpeed phy feature */
14546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14547             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14548              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14549              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14550             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14551             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14552                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14553
14554         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14555             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14556                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14557         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14558                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14559
14560         if (tg3_flag(tp, 5705_PLUS) &&
14561             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14562             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14563             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14564             !tg3_flag(tp, 57765_PLUS)) {
14565                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14566                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14567                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14568                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14569                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14570                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14571                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14572                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14573                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14574                 } else
14575                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14576         }
14577
14578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14579             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14580                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14581                 if (tp->phy_otp == 0)
14582                         tp->phy_otp = TG3_OTP_DEFAULT;
14583         }
14584
14585         if (tg3_flag(tp, CPMU_PRESENT))
14586                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14587         else
14588                 tp->mi_mode = MAC_MI_MODE_BASE;
14589
14590         tp->coalesce_mode = 0;
14591         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14592             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14593                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14594
14595         /* Set these bits to enable statistics workaround. */
14596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14597             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14598             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14599                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14600                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14601         }
14602
14603         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14604             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14605                 tg3_flag_set(tp, USE_PHYLIB);
14606
14607         err = tg3_mdio_init(tp);
14608         if (err)
14609                 return err;
14610
14611         /* Initialize data/descriptor byte/word swapping. */
14612         val = tr32(GRC_MODE);
14613         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14614                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14615                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14616                         GRC_MODE_B2HRX_ENABLE |
14617                         GRC_MODE_HTX2B_ENABLE |
14618                         GRC_MODE_HOST_STACKUP);
14619         else
14620                 val &= GRC_MODE_HOST_STACKUP;
14621
14622         tw32(GRC_MODE, val | tp->grc_mode);
14623
14624         tg3_switch_clocks(tp);
14625
14626         /* Clear this out for sanity. */
14627         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14628
14629         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14630                               &pci_state_reg);
14631         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14632             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14633                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14634
14635                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14636                     chiprevid == CHIPREV_ID_5701_B0 ||
14637                     chiprevid == CHIPREV_ID_5701_B2 ||
14638                     chiprevid == CHIPREV_ID_5701_B5) {
14639                         void __iomem *sram_base;
14640
14641                         /* Write some dummy words into the SRAM status block
14642                          * area, see if it reads back correctly.  If the return
14643                          * value is bad, force enable the PCIX workaround.
14644                          */
14645                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14646
14647                         writel(0x00000000, sram_base);
14648                         writel(0x00000000, sram_base + 4);
14649                         writel(0xffffffff, sram_base + 4);
14650                         if (readl(sram_base) != 0x00000000)
14651                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14652                 }
14653         }
14654
14655         udelay(50);
14656         tg3_nvram_init(tp);
14657
14658         grc_misc_cfg = tr32(GRC_MISC_CFG);
14659         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14660
14661         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14662             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14663              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14664                 tg3_flag_set(tp, IS_5788);
14665
14666         if (!tg3_flag(tp, IS_5788) &&
14667             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14668                 tg3_flag_set(tp, TAGGED_STATUS);
14669         if (tg3_flag(tp, TAGGED_STATUS)) {
14670                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14671                                       HOSTCC_MODE_CLRTICK_TXBD);
14672
14673                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14674                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14675                                        tp->misc_host_ctrl);
14676         }
14677
14678         /* Preserve the APE MAC_MODE bits */
14679         if (tg3_flag(tp, ENABLE_APE))
14680                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14681         else
14682                 tp->mac_mode = 0;
14683
14684         /* these are limited to 10/100 only */
14685         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14686              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14687             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14688              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14689              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14690               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14691               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14692             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14693              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14694               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14695               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14696             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14697             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14698             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14699             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14700                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14701
14702         err = tg3_phy_probe(tp);
14703         if (err) {
14704                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14705                 /* ... but do not return immediately ... */
14706                 tg3_mdio_fini(tp);
14707         }
14708
14709         tg3_read_vpd(tp);
14710         tg3_read_fw_ver(tp);
14711
14712         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14713                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14714         } else {
14715                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14716                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14717                 else
14718                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14719         }
14720
14721         /* 5700 {AX,BX} chips have a broken status block link
14722          * change bit implementation, so we must use the
14723          * status register in those cases.
14724          */
14725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14726                 tg3_flag_set(tp, USE_LINKCHG_REG);
14727         else
14728                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14729
14730         /* The led_ctrl is set during tg3_phy_probe, here we might
14731          * have to force the link status polling mechanism based
14732          * upon subsystem IDs.
14733          */
14734         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14735             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14736             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14737                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14738                 tg3_flag_set(tp, USE_LINKCHG_REG);
14739         }
14740
14741         /* For all SERDES we poll the MAC status register. */
14742         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14743                 tg3_flag_set(tp, POLL_SERDES);
14744         else
14745                 tg3_flag_clear(tp, POLL_SERDES);
14746
14747         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14748         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14749         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14750             tg3_flag(tp, PCIX_MODE)) {
14751                 tp->rx_offset = NET_SKB_PAD;
14752 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14753                 tp->rx_copy_thresh = ~(u16)0;
14754 #endif
14755         }
14756
14757         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14758         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14759         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14760
14761         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14762
14763         /* Increment the rx prod index on the rx std ring by at most
14764          * 8 for these chips to workaround hw errata.
14765          */
14766         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14767             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14768             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14769                 tp->rx_std_max_post = 8;
14770
14771         if (tg3_flag(tp, ASPM_WORKAROUND))
14772                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14773                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14774
14775         return err;
14776 }
14777
14778 #ifdef CONFIG_SPARC
14779 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14780 {
14781         struct net_device *dev = tp->dev;
14782         struct pci_dev *pdev = tp->pdev;
14783         struct device_node *dp = pci_device_to_OF_node(pdev);
14784         const unsigned char *addr;
14785         int len;
14786
14787         addr = of_get_property(dp, "local-mac-address", &len);
14788         if (addr && len == 6) {
14789                 memcpy(dev->dev_addr, addr, 6);
14790                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14791                 return 0;
14792         }
14793         return -ENODEV;
14794 }
14795
14796 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14797 {
14798         struct net_device *dev = tp->dev;
14799
14800         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14801         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14802         return 0;
14803 }
14804 #endif
14805
14806 static int __devinit tg3_get_device_address(struct tg3 *tp)
14807 {
14808         struct net_device *dev = tp->dev;
14809         u32 hi, lo, mac_offset;
14810         int addr_ok = 0;
14811
14812 #ifdef CONFIG_SPARC
14813         if (!tg3_get_macaddr_sparc(tp))
14814                 return 0;
14815 #endif
14816
14817         mac_offset = 0x7c;
14818         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14819             tg3_flag(tp, 5780_CLASS)) {
14820                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14821                         mac_offset = 0xcc;
14822                 if (tg3_nvram_lock(tp))
14823                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14824                 else
14825                         tg3_nvram_unlock(tp);
14826         } else if (tg3_flag(tp, 5717_PLUS)) {
14827                 if (tp->pci_fn & 1)
14828                         mac_offset = 0xcc;
14829                 if (tp->pci_fn > 1)
14830                         mac_offset += 0x18c;
14831         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14832                 mac_offset = 0x10;
14833
14834         /* First try to get it from MAC address mailbox. */
14835         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14836         if ((hi >> 16) == 0x484b) {
14837                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14838                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14839
14840                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14841                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14842                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14843                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14844                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14845
14846                 /* Some old bootcode may report a 0 MAC address in SRAM */
14847                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14848         }
14849         if (!addr_ok) {
14850                 /* Next, try NVRAM. */
14851                 if (!tg3_flag(tp, NO_NVRAM) &&
14852                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14853                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14854                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14855                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14856                 }
14857                 /* Finally just fetch it out of the MAC control regs. */
14858                 else {
14859                         hi = tr32(MAC_ADDR_0_HIGH);
14860                         lo = tr32(MAC_ADDR_0_LOW);
14861
14862                         dev->dev_addr[5] = lo & 0xff;
14863                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14864                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14865                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14866                         dev->dev_addr[1] = hi & 0xff;
14867                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14868                 }
14869         }
14870
14871         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14872 #ifdef CONFIG_SPARC
14873                 if (!tg3_get_default_macaddr_sparc(tp))
14874                         return 0;
14875 #endif
14876                 return -EINVAL;
14877         }
14878         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14879         return 0;
14880 }
14881
14882 #define BOUNDARY_SINGLE_CACHELINE       1
14883 #define BOUNDARY_MULTI_CACHELINE        2
14884
14885 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14886 {
14887         int cacheline_size;
14888         u8 byte;
14889         int goal;
14890
14891         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14892         if (byte == 0)
14893                 cacheline_size = 1024;
14894         else
14895                 cacheline_size = (int) byte * 4;
14896
14897         /* On 5703 and later chips, the boundary bits have no
14898          * effect.
14899          */
14900         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14901             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14902             !tg3_flag(tp, PCI_EXPRESS))
14903                 goto out;
14904
14905 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14906         goal = BOUNDARY_MULTI_CACHELINE;
14907 #else
14908 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14909         goal = BOUNDARY_SINGLE_CACHELINE;
14910 #else
14911         goal = 0;
14912 #endif
14913 #endif
14914
14915         if (tg3_flag(tp, 57765_PLUS)) {
14916                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14917                 goto out;
14918         }
14919
14920         if (!goal)
14921                 goto out;
14922
14923         /* PCI controllers on most RISC systems tend to disconnect
14924          * when a device tries to burst across a cache-line boundary.
14925          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14926          *
14927          * Unfortunately, for PCI-E there are only limited
14928          * write-side controls for this, and thus for reads
14929          * we will still get the disconnects.  We'll also waste
14930          * these PCI cycles for both read and write for chips
14931          * other than 5700 and 5701 which do not implement the
14932          * boundary bits.
14933          */
14934         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14935                 switch (cacheline_size) {
14936                 case 16:
14937                 case 32:
14938                 case 64:
14939                 case 128:
14940                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14941                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14942                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14943                         } else {
14944                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14945                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14946                         }
14947                         break;
14948
14949                 case 256:
14950                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14951                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14952                         break;
14953
14954                 default:
14955                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14956                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14957                         break;
14958                 }
14959         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14960                 switch (cacheline_size) {
14961                 case 16:
14962                 case 32:
14963                 case 64:
14964                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14965                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14966                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14967                                 break;
14968                         }
14969                         /* fallthrough */
14970                 case 128:
14971                 default:
14972                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14973                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14974                         break;
14975                 }
14976         } else {
14977                 switch (cacheline_size) {
14978                 case 16:
14979                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14980                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14981                                         DMA_RWCTRL_WRITE_BNDRY_16);
14982                                 break;
14983                         }
14984                         /* fallthrough */
14985                 case 32:
14986                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14987                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14988                                         DMA_RWCTRL_WRITE_BNDRY_32);
14989                                 break;
14990                         }
14991                         /* fallthrough */
14992                 case 64:
14993                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14994                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14995                                         DMA_RWCTRL_WRITE_BNDRY_64);
14996                                 break;
14997                         }
14998                         /* fallthrough */
14999                 case 128:
15000                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15001                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15002                                         DMA_RWCTRL_WRITE_BNDRY_128);
15003                                 break;
15004                         }
15005                         /* fallthrough */
15006                 case 256:
15007                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15008                                 DMA_RWCTRL_WRITE_BNDRY_256);
15009                         break;
15010                 case 512:
15011                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15012                                 DMA_RWCTRL_WRITE_BNDRY_512);
15013                         break;
15014                 case 1024:
15015                 default:
15016                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15017                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15018                         break;
15019                 }
15020         }
15021
15022 out:
15023         return val;
15024 }
15025
15026 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15027 {
15028         struct tg3_internal_buffer_desc test_desc;
15029         u32 sram_dma_descs;
15030         int i, ret;
15031
15032         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15033
15034         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15035         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15036         tw32(RDMAC_STATUS, 0);
15037         tw32(WDMAC_STATUS, 0);
15038
15039         tw32(BUFMGR_MODE, 0);
15040         tw32(FTQ_RESET, 0);
15041
15042         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15043         test_desc.addr_lo = buf_dma & 0xffffffff;
15044         test_desc.nic_mbuf = 0x00002100;
15045         test_desc.len = size;
15046
15047         /*
15048          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15049          * the *second* time the tg3 driver was getting loaded after an
15050          * initial scan.
15051          *
15052          * Broadcom tells me:
15053          *   ...the DMA engine is connected to the GRC block and a DMA
15054          *   reset may affect the GRC block in some unpredictable way...
15055          *   The behavior of resets to individual blocks has not been tested.
15056          *
15057          * Broadcom noted the GRC reset will also reset all sub-components.
15058          */
15059         if (to_device) {
15060                 test_desc.cqid_sqid = (13 << 8) | 2;
15061
15062                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15063                 udelay(40);
15064         } else {
15065                 test_desc.cqid_sqid = (16 << 8) | 7;
15066
15067                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15068                 udelay(40);
15069         }
15070         test_desc.flags = 0x00000005;
15071
15072         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15073                 u32 val;
15074
15075                 val = *(((u32 *)&test_desc) + i);
15076                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15077                                        sram_dma_descs + (i * sizeof(u32)));
15078                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15079         }
15080         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15081
15082         if (to_device)
15083                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15084         else
15085                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15086
15087         ret = -ENODEV;
15088         for (i = 0; i < 40; i++) {
15089                 u32 val;
15090
15091                 if (to_device)
15092                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15093                 else
15094                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15095                 if ((val & 0xffff) == sram_dma_descs) {
15096                         ret = 0;
15097                         break;
15098                 }
15099
15100                 udelay(100);
15101         }
15102
15103         return ret;
15104 }
15105
15106 #define TEST_BUFFER_SIZE        0x2000
15107
15108 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15109         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15110         { },
15111 };
15112
15113 static int __devinit tg3_test_dma(struct tg3 *tp)
15114 {
15115         dma_addr_t buf_dma;
15116         u32 *buf, saved_dma_rwctrl;
15117         int ret = 0;
15118
15119         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15120                                  &buf_dma, GFP_KERNEL);
15121         if (!buf) {
15122                 ret = -ENOMEM;
15123                 goto out_nofree;
15124         }
15125
15126         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15127                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15128
15129         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15130
15131         if (tg3_flag(tp, 57765_PLUS))
15132                 goto out;
15133
15134         if (tg3_flag(tp, PCI_EXPRESS)) {
15135                 /* DMA read watermark not used on PCIE */
15136                 tp->dma_rwctrl |= 0x00180000;
15137         } else if (!tg3_flag(tp, PCIX_MODE)) {
15138                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15139                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15140                         tp->dma_rwctrl |= 0x003f0000;
15141                 else
15142                         tp->dma_rwctrl |= 0x003f000f;
15143         } else {
15144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15145                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15146                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15147                         u32 read_water = 0x7;
15148
15149                         /* If the 5704 is behind the EPB bridge, we can
15150                          * do the less restrictive ONE_DMA workaround for
15151                          * better performance.
15152                          */
15153                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15154                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15155                                 tp->dma_rwctrl |= 0x8000;
15156                         else if (ccval == 0x6 || ccval == 0x7)
15157                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15158
15159                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15160                                 read_water = 4;
15161                         /* Set bit 23 to enable PCIX hw bug fix */
15162                         tp->dma_rwctrl |=
15163                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15164                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15165                                 (1 << 23);
15166                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15167                         /* 5780 always in PCIX mode */
15168                         tp->dma_rwctrl |= 0x00144000;
15169                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15170                         /* 5714 always in PCIX mode */
15171                         tp->dma_rwctrl |= 0x00148000;
15172                 } else {
15173                         tp->dma_rwctrl |= 0x001b000f;
15174                 }
15175         }
15176
15177         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15178             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15179                 tp->dma_rwctrl &= 0xfffffff0;
15180
15181         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15182             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15183                 /* Remove this if it causes problems for some boards. */
15184                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15185
15186                 /* On 5700/5701 chips, we need to set this bit.
15187                  * Otherwise the chip will issue cacheline transactions
15188                  * to streamable DMA memory with not all the byte
15189                  * enables turned on.  This is an error on several
15190                  * RISC PCI controllers, in particular sparc64.
15191                  *
15192                  * On 5703/5704 chips, this bit has been reassigned
15193                  * a different meaning.  In particular, it is used
15194                  * on those chips to enable a PCI-X workaround.
15195                  */
15196                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15197         }
15198
15199         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15200
15201 #if 0
15202         /* Unneeded, already done by tg3_get_invariants.  */
15203         tg3_switch_clocks(tp);
15204 #endif
15205
15206         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15207             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15208                 goto out;
15209
15210         /* It is best to perform DMA test with maximum write burst size
15211          * to expose the 5700/5701 write DMA bug.
15212          */
15213         saved_dma_rwctrl = tp->dma_rwctrl;
15214         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15215         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15216
15217         while (1) {
15218                 u32 *p = buf, i;
15219
15220                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15221                         p[i] = i;
15222
15223                 /* Send the buffer to the chip. */
15224                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15225                 if (ret) {
15226                         dev_err(&tp->pdev->dev,
15227                                 "%s: Buffer write failed. err = %d\n",
15228                                 __func__, ret);
15229                         break;
15230                 }
15231
15232 #if 0
15233                 /* validate data reached card RAM correctly. */
15234                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15235                         u32 val;
15236                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15237                         if (le32_to_cpu(val) != p[i]) {
15238                                 dev_err(&tp->pdev->dev,
15239                                         "%s: Buffer corrupted on device! "
15240                                         "(%d != %d)\n", __func__, val, i);
15241                                 /* ret = -ENODEV here? */
15242                         }
15243                         p[i] = 0;
15244                 }
15245 #endif
15246                 /* Now read it back. */
15247                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15248                 if (ret) {
15249                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15250                                 "err = %d\n", __func__, ret);
15251                         break;
15252                 }
15253
15254                 /* Verify it. */
15255                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15256                         if (p[i] == i)
15257                                 continue;
15258
15259                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15260                             DMA_RWCTRL_WRITE_BNDRY_16) {
15261                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15262                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15263                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15264                                 break;
15265                         } else {
15266                                 dev_err(&tp->pdev->dev,
15267                                         "%s: Buffer corrupted on read back! "
15268                                         "(%d != %d)\n", __func__, p[i], i);
15269                                 ret = -ENODEV;
15270                                 goto out;
15271                         }
15272                 }
15273
15274                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15275                         /* Success. */
15276                         ret = 0;
15277                         break;
15278                 }
15279         }
15280         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15281             DMA_RWCTRL_WRITE_BNDRY_16) {
15282                 /* DMA test passed without adjusting DMA boundary,
15283                  * now look for chipsets that are known to expose the
15284                  * DMA bug without failing the test.
15285                  */
15286                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15287                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15288                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15289                 } else {
15290                         /* Safe to use the calculated DMA boundary. */
15291                         tp->dma_rwctrl = saved_dma_rwctrl;
15292                 }
15293
15294                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15295         }
15296
15297 out:
15298         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15299 out_nofree:
15300         return ret;
15301 }
15302
15303 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15304 {
15305         if (tg3_flag(tp, 57765_PLUS)) {
15306                 tp->bufmgr_config.mbuf_read_dma_low_water =
15307                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15308                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15309                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15310                 tp->bufmgr_config.mbuf_high_water =
15311                         DEFAULT_MB_HIGH_WATER_57765;
15312
15313                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15314                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15315                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15316                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15317                 tp->bufmgr_config.mbuf_high_water_jumbo =
15318                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15319         } else if (tg3_flag(tp, 5705_PLUS)) {
15320                 tp->bufmgr_config.mbuf_read_dma_low_water =
15321                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15322                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15323                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15324                 tp->bufmgr_config.mbuf_high_water =
15325                         DEFAULT_MB_HIGH_WATER_5705;
15326                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15327                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15328                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15329                         tp->bufmgr_config.mbuf_high_water =
15330                                 DEFAULT_MB_HIGH_WATER_5906;
15331                 }
15332
15333                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15334                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15335                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15336                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15337                 tp->bufmgr_config.mbuf_high_water_jumbo =
15338                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15339         } else {
15340                 tp->bufmgr_config.mbuf_read_dma_low_water =
15341                         DEFAULT_MB_RDMA_LOW_WATER;
15342                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15343                         DEFAULT_MB_MACRX_LOW_WATER;
15344                 tp->bufmgr_config.mbuf_high_water =
15345                         DEFAULT_MB_HIGH_WATER;
15346
15347                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15348                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15349                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15350                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15351                 tp->bufmgr_config.mbuf_high_water_jumbo =
15352                         DEFAULT_MB_HIGH_WATER_JUMBO;
15353         }
15354
15355         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15356         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15357 }
15358
15359 static char * __devinit tg3_phy_string(struct tg3 *tp)
15360 {
15361         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15362         case TG3_PHY_ID_BCM5400:        return "5400";
15363         case TG3_PHY_ID_BCM5401:        return "5401";
15364         case TG3_PHY_ID_BCM5411:        return "5411";
15365         case TG3_PHY_ID_BCM5701:        return "5701";
15366         case TG3_PHY_ID_BCM5703:        return "5703";
15367         case TG3_PHY_ID_BCM5704:        return "5704";
15368         case TG3_PHY_ID_BCM5705:        return "5705";
15369         case TG3_PHY_ID_BCM5750:        return "5750";
15370         case TG3_PHY_ID_BCM5752:        return "5752";
15371         case TG3_PHY_ID_BCM5714:        return "5714";
15372         case TG3_PHY_ID_BCM5780:        return "5780";
15373         case TG3_PHY_ID_BCM5755:        return "5755";
15374         case TG3_PHY_ID_BCM5787:        return "5787";
15375         case TG3_PHY_ID_BCM5784:        return "5784";
15376         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15377         case TG3_PHY_ID_BCM5906:        return "5906";
15378         case TG3_PHY_ID_BCM5761:        return "5761";
15379         case TG3_PHY_ID_BCM5718C:       return "5718C";
15380         case TG3_PHY_ID_BCM5718S:       return "5718S";
15381         case TG3_PHY_ID_BCM57765:       return "57765";
15382         case TG3_PHY_ID_BCM5719C:       return "5719C";
15383         case TG3_PHY_ID_BCM5720C:       return "5720C";
15384         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15385         case 0:                 return "serdes";
15386         default:                return "unknown";
15387         }
15388 }
15389
15390 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15391 {
15392         if (tg3_flag(tp, PCI_EXPRESS)) {
15393                 strcpy(str, "PCI Express");
15394                 return str;
15395         } else if (tg3_flag(tp, PCIX_MODE)) {
15396                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15397
15398                 strcpy(str, "PCIX:");
15399
15400                 if ((clock_ctrl == 7) ||
15401                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15402                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15403                         strcat(str, "133MHz");
15404                 else if (clock_ctrl == 0)
15405                         strcat(str, "33MHz");
15406                 else if (clock_ctrl == 2)
15407                         strcat(str, "50MHz");
15408                 else if (clock_ctrl == 4)
15409                         strcat(str, "66MHz");
15410                 else if (clock_ctrl == 6)
15411                         strcat(str, "100MHz");
15412         } else {
15413                 strcpy(str, "PCI:");
15414                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15415                         strcat(str, "66MHz");
15416                 else
15417                         strcat(str, "33MHz");
15418         }
15419         if (tg3_flag(tp, PCI_32BIT))
15420                 strcat(str, ":32-bit");
15421         else
15422                 strcat(str, ":64-bit");
15423         return str;
15424 }
15425
15426 static void __devinit tg3_init_coal(struct tg3 *tp)
15427 {
15428         struct ethtool_coalesce *ec = &tp->coal;
15429
15430         memset(ec, 0, sizeof(*ec));
15431         ec->cmd = ETHTOOL_GCOALESCE;
15432         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15433         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15434         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15435         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15436         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15437         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15438         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15439         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15440         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15441
15442         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15443                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15444                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15445                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15446                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15447                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15448         }
15449
15450         if (tg3_flag(tp, 5705_PLUS)) {
15451                 ec->rx_coalesce_usecs_irq = 0;
15452                 ec->tx_coalesce_usecs_irq = 0;
15453                 ec->stats_block_coalesce_usecs = 0;
15454         }
15455 }
15456
15457 static int __devinit tg3_init_one(struct pci_dev *pdev,
15458                                   const struct pci_device_id *ent)
15459 {
15460         struct net_device *dev;
15461         struct tg3 *tp;
15462         int i, err, pm_cap;
15463         u32 sndmbx, rcvmbx, intmbx;
15464         char str[40];
15465         u64 dma_mask, persist_dma_mask;
15466         netdev_features_t features = 0;
15467
15468         printk_once(KERN_INFO "%s\n", version);
15469
15470         err = pci_enable_device(pdev);
15471         if (err) {
15472                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15473                 return err;
15474         }
15475
15476         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15477         if (err) {
15478                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15479                 goto err_out_disable_pdev;
15480         }
15481
15482         pci_set_master(pdev);
15483
15484         /* Find power-management capability. */
15485         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15486         if (pm_cap == 0) {
15487                 dev_err(&pdev->dev,
15488                         "Cannot find Power Management capability, aborting\n");
15489                 err = -EIO;
15490                 goto err_out_free_res;
15491         }
15492
15493         err = pci_set_power_state(pdev, PCI_D0);
15494         if (err) {
15495                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15496                 goto err_out_free_res;
15497         }
15498
15499         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15500         if (!dev) {
15501                 err = -ENOMEM;
15502                 goto err_out_power_down;
15503         }
15504
15505         SET_NETDEV_DEV(dev, &pdev->dev);
15506
15507         tp = netdev_priv(dev);
15508         tp->pdev = pdev;
15509         tp->dev = dev;
15510         tp->pm_cap = pm_cap;
15511         tp->rx_mode = TG3_DEF_RX_MODE;
15512         tp->tx_mode = TG3_DEF_TX_MODE;
15513
15514         if (tg3_debug > 0)
15515                 tp->msg_enable = tg3_debug;
15516         else
15517                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15518
15519         /* The word/byte swap controls here control register access byte
15520          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15521          * setting below.
15522          */
15523         tp->misc_host_ctrl =
15524                 MISC_HOST_CTRL_MASK_PCI_INT |
15525                 MISC_HOST_CTRL_WORD_SWAP |
15526                 MISC_HOST_CTRL_INDIR_ACCESS |
15527                 MISC_HOST_CTRL_PCISTATE_RW;
15528
15529         /* The NONFRM (non-frame) byte/word swap controls take effect
15530          * on descriptor entries, anything which isn't packet data.
15531          *
15532          * The StrongARM chips on the board (one for tx, one for rx)
15533          * are running in big-endian mode.
15534          */
15535         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15536                         GRC_MODE_WSWAP_NONFRM_DATA);
15537 #ifdef __BIG_ENDIAN
15538         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15539 #endif
15540         spin_lock_init(&tp->lock);
15541         spin_lock_init(&tp->indirect_lock);
15542         INIT_WORK(&tp->reset_task, tg3_reset_task);
15543
15544         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15545         if (!tp->regs) {
15546                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15547                 err = -ENOMEM;
15548                 goto err_out_free_dev;
15549         }
15550
15551         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15552             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15553             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15554             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15555             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15556             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15557             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15558             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15559                 tg3_flag_set(tp, ENABLE_APE);
15560                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15561                 if (!tp->aperegs) {
15562                         dev_err(&pdev->dev,
15563                                 "Cannot map APE registers, aborting\n");
15564                         err = -ENOMEM;
15565                         goto err_out_iounmap;
15566                 }
15567         }
15568
15569         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15570         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15571
15572         dev->ethtool_ops = &tg3_ethtool_ops;
15573         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15574         dev->netdev_ops = &tg3_netdev_ops;
15575         dev->irq = pdev->irq;
15576
15577         err = tg3_get_invariants(tp);
15578         if (err) {
15579                 dev_err(&pdev->dev,
15580                         "Problem fetching invariants of chip, aborting\n");
15581                 goto err_out_apeunmap;
15582         }
15583
15584         /* The EPB bridge inside 5714, 5715, and 5780 and any
15585          * device behind the EPB cannot support DMA addresses > 40-bit.
15586          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15587          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15588          * do DMA address check in tg3_start_xmit().
15589          */
15590         if (tg3_flag(tp, IS_5788))
15591                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15592         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15593                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15594 #ifdef CONFIG_HIGHMEM
15595                 dma_mask = DMA_BIT_MASK(64);
15596 #endif
15597         } else
15598                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15599
15600         /* Configure DMA attributes. */
15601         if (dma_mask > DMA_BIT_MASK(32)) {
15602                 err = pci_set_dma_mask(pdev, dma_mask);
15603                 if (!err) {
15604                         features |= NETIF_F_HIGHDMA;
15605                         err = pci_set_consistent_dma_mask(pdev,
15606                                                           persist_dma_mask);
15607                         if (err < 0) {
15608                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15609                                         "DMA for consistent allocations\n");
15610                                 goto err_out_apeunmap;
15611                         }
15612                 }
15613         }
15614         if (err || dma_mask == DMA_BIT_MASK(32)) {
15615                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15616                 if (err) {
15617                         dev_err(&pdev->dev,
15618                                 "No usable DMA configuration, aborting\n");
15619                         goto err_out_apeunmap;
15620                 }
15621         }
15622
15623         tg3_init_bufmgr_config(tp);
15624
15625         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15626
15627         /* 5700 B0 chips do not support checksumming correctly due
15628          * to hardware bugs.
15629          */
15630         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15631                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15632
15633                 if (tg3_flag(tp, 5755_PLUS))
15634                         features |= NETIF_F_IPV6_CSUM;
15635         }
15636
15637         /* TSO is on by default on chips that support hardware TSO.
15638          * Firmware TSO on older chips gives lower performance, so it
15639          * is off by default, but can be enabled using ethtool.
15640          */
15641         if ((tg3_flag(tp, HW_TSO_1) ||
15642              tg3_flag(tp, HW_TSO_2) ||
15643              tg3_flag(tp, HW_TSO_3)) &&
15644             (features & NETIF_F_IP_CSUM))
15645                 features |= NETIF_F_TSO;
15646         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15647                 if (features & NETIF_F_IPV6_CSUM)
15648                         features |= NETIF_F_TSO6;
15649                 if (tg3_flag(tp, HW_TSO_3) ||
15650                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15651                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15652                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15653                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15654                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15655                         features |= NETIF_F_TSO_ECN;
15656         }
15657
15658         dev->features |= features;
15659         dev->vlan_features |= features;
15660
15661         /*
15662          * Add loopback capability only for a subset of devices that support
15663          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15664          * loopback for the remaining devices.
15665          */
15666         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15667             !tg3_flag(tp, CPMU_PRESENT))
15668                 /* Add the loopback capability */
15669                 features |= NETIF_F_LOOPBACK;
15670
15671         dev->hw_features |= features;
15672
15673         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15674             !tg3_flag(tp, TSO_CAPABLE) &&
15675             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15676                 tg3_flag_set(tp, MAX_RXPEND_64);
15677                 tp->rx_pending = 63;
15678         }
15679
15680         err = tg3_get_device_address(tp);
15681         if (err) {
15682                 dev_err(&pdev->dev,
15683                         "Could not obtain valid ethernet address, aborting\n");
15684                 goto err_out_apeunmap;
15685         }
15686
15687         /*
15688          * Reset chip in case UNDI or EFI driver did not shutdown
15689          * DMA self test will enable WDMAC and we'll see (spurious)
15690          * pending DMA on the PCI bus at that point.
15691          */
15692         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15693             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15694                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15695                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15696         }
15697
15698         err = tg3_test_dma(tp);
15699         if (err) {
15700                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15701                 goto err_out_apeunmap;
15702         }
15703
15704         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15705         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15706         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15707         for (i = 0; i < tp->irq_max; i++) {
15708                 struct tg3_napi *tnapi = &tp->napi[i];
15709
15710                 tnapi->tp = tp;
15711                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15712
15713                 tnapi->int_mbox = intmbx;
15714                 if (i <= 4)
15715                         intmbx += 0x8;
15716                 else
15717                         intmbx += 0x4;
15718
15719                 tnapi->consmbox = rcvmbx;
15720                 tnapi->prodmbox = sndmbx;
15721
15722                 if (i)
15723                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15724                 else
15725                         tnapi->coal_now = HOSTCC_MODE_NOW;
15726
15727                 if (!tg3_flag(tp, SUPPORT_MSIX))
15728                         break;
15729
15730                 /*
15731                  * If we support MSIX, we'll be using RSS.  If we're using
15732                  * RSS, the first vector only handles link interrupts and the
15733                  * remaining vectors handle rx and tx interrupts.  Reuse the
15734                  * mailbox values for the next iteration.  The values we setup
15735                  * above are still useful for the single vectored mode.
15736                  */
15737                 if (!i)
15738                         continue;
15739
15740                 rcvmbx += 0x8;
15741
15742                 if (sndmbx & 0x4)
15743                         sndmbx -= 0x4;
15744                 else
15745                         sndmbx += 0xc;
15746         }
15747
15748         tg3_init_coal(tp);
15749
15750         pci_set_drvdata(pdev, dev);
15751
15752         if (tg3_flag(tp, 5717_PLUS)) {
15753                 /* Resume a low-power mode */
15754                 tg3_frob_aux_power(tp, false);
15755         }
15756
15757         tg3_timer_init(tp);
15758
15759         err = register_netdev(dev);
15760         if (err) {
15761                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15762                 goto err_out_apeunmap;
15763         }
15764
15765         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15766                     tp->board_part_number,
15767                     tp->pci_chip_rev_id,
15768                     tg3_bus_string(tp, str),
15769                     dev->dev_addr);
15770
15771         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15772                 struct phy_device *phydev;
15773                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15774                 netdev_info(dev,
15775                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15776                             phydev->drv->name, dev_name(&phydev->dev));
15777         } else {
15778                 char *ethtype;
15779
15780                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15781                         ethtype = "10/100Base-TX";
15782                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15783                         ethtype = "1000Base-SX";
15784                 else
15785                         ethtype = "10/100/1000Base-T";
15786
15787                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15788                             "(WireSpeed[%d], EEE[%d])\n",
15789                             tg3_phy_string(tp), ethtype,
15790                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15791                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15792         }
15793
15794         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15795                     (dev->features & NETIF_F_RXCSUM) != 0,
15796                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15797                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15798                     tg3_flag(tp, ENABLE_ASF) != 0,
15799                     tg3_flag(tp, TSO_CAPABLE) != 0);
15800         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15801                     tp->dma_rwctrl,
15802                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15803                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15804
15805         pci_save_state(pdev);
15806
15807         return 0;
15808
15809 err_out_apeunmap:
15810         if (tp->aperegs) {
15811                 iounmap(tp->aperegs);
15812                 tp->aperegs = NULL;
15813         }
15814
15815 err_out_iounmap:
15816         if (tp->regs) {
15817                 iounmap(tp->regs);
15818                 tp->regs = NULL;
15819         }
15820
15821 err_out_free_dev:
15822         free_netdev(dev);
15823
15824 err_out_power_down:
15825         pci_set_power_state(pdev, PCI_D3hot);
15826
15827 err_out_free_res:
15828         pci_release_regions(pdev);
15829
15830 err_out_disable_pdev:
15831         pci_disable_device(pdev);
15832         pci_set_drvdata(pdev, NULL);
15833         return err;
15834 }
15835
15836 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15837 {
15838         struct net_device *dev = pci_get_drvdata(pdev);
15839
15840         if (dev) {
15841                 struct tg3 *tp = netdev_priv(dev);
15842
15843                 if (tp->fw)
15844                         release_firmware(tp->fw);
15845
15846                 tg3_reset_task_cancel(tp);
15847
15848                 if (tg3_flag(tp, USE_PHYLIB)) {
15849                         tg3_phy_fini(tp);
15850                         tg3_mdio_fini(tp);
15851                 }
15852
15853                 unregister_netdev(dev);
15854                 if (tp->aperegs) {
15855                         iounmap(tp->aperegs);
15856                         tp->aperegs = NULL;
15857                 }
15858                 if (tp->regs) {
15859                         iounmap(tp->regs);
15860                         tp->regs = NULL;
15861                 }
15862                 free_netdev(dev);
15863                 pci_release_regions(pdev);
15864                 pci_disable_device(pdev);
15865                 pci_set_drvdata(pdev, NULL);
15866         }
15867 }
15868
15869 #ifdef CONFIG_PM_SLEEP
15870 static int tg3_suspend(struct device *device)
15871 {
15872         struct pci_dev *pdev = to_pci_dev(device);
15873         struct net_device *dev = pci_get_drvdata(pdev);
15874         struct tg3 *tp = netdev_priv(dev);
15875         int err;
15876
15877         if (!netif_running(dev))
15878                 return 0;
15879
15880         tg3_reset_task_cancel(tp);
15881         tg3_phy_stop(tp);
15882         tg3_netif_stop(tp);
15883
15884         tg3_timer_stop(tp);
15885
15886         tg3_full_lock(tp, 1);
15887         tg3_disable_ints(tp);
15888         tg3_full_unlock(tp);
15889
15890         netif_device_detach(dev);
15891
15892         tg3_full_lock(tp, 0);
15893         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15894         tg3_flag_clear(tp, INIT_COMPLETE);
15895         tg3_full_unlock(tp);
15896
15897         err = tg3_power_down_prepare(tp);
15898         if (err) {
15899                 int err2;
15900
15901                 tg3_full_lock(tp, 0);
15902
15903                 tg3_flag_set(tp, INIT_COMPLETE);
15904                 err2 = tg3_restart_hw(tp, 1);
15905                 if (err2)
15906                         goto out;
15907
15908                 tg3_timer_start(tp);
15909
15910                 netif_device_attach(dev);
15911                 tg3_netif_start(tp);
15912
15913 out:
15914                 tg3_full_unlock(tp);
15915
15916                 if (!err2)
15917                         tg3_phy_start(tp);
15918         }
15919
15920         return err;
15921 }
15922
15923 static int tg3_resume(struct device *device)
15924 {
15925         struct pci_dev *pdev = to_pci_dev(device);
15926         struct net_device *dev = pci_get_drvdata(pdev);
15927         struct tg3 *tp = netdev_priv(dev);
15928         int err;
15929
15930         if (!netif_running(dev))
15931                 return 0;
15932
15933         netif_device_attach(dev);
15934
15935         tg3_full_lock(tp, 0);
15936
15937         tg3_flag_set(tp, INIT_COMPLETE);
15938         err = tg3_restart_hw(tp, 1);
15939         if (err)
15940                 goto out;
15941
15942         tg3_timer_start(tp);
15943
15944         tg3_netif_start(tp);
15945
15946 out:
15947         tg3_full_unlock(tp);
15948
15949         if (!err)
15950                 tg3_phy_start(tp);
15951
15952         return err;
15953 }
15954
15955 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15956 #define TG3_PM_OPS (&tg3_pm_ops)
15957
15958 #else
15959
15960 #define TG3_PM_OPS NULL
15961
15962 #endif /* CONFIG_PM_SLEEP */
15963
15964 /**
15965  * tg3_io_error_detected - called when PCI error is detected
15966  * @pdev: Pointer to PCI device
15967  * @state: The current pci connection state
15968  *
15969  * This function is called after a PCI bus error affecting
15970  * this device has been detected.
15971  */
15972 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15973                                               pci_channel_state_t state)
15974 {
15975         struct net_device *netdev = pci_get_drvdata(pdev);
15976         struct tg3 *tp = netdev_priv(netdev);
15977         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15978
15979         netdev_info(netdev, "PCI I/O error detected\n");
15980
15981         rtnl_lock();
15982
15983         if (!netif_running(netdev))
15984                 goto done;
15985
15986         tg3_phy_stop(tp);
15987
15988         tg3_netif_stop(tp);
15989
15990         tg3_timer_stop(tp);
15991
15992         /* Want to make sure that the reset task doesn't run */
15993         tg3_reset_task_cancel(tp);
15994
15995         netif_device_detach(netdev);
15996
15997         /* Clean up software state, even if MMIO is blocked */
15998         tg3_full_lock(tp, 0);
15999         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16000         tg3_full_unlock(tp);
16001
16002 done:
16003         if (state == pci_channel_io_perm_failure)
16004                 err = PCI_ERS_RESULT_DISCONNECT;
16005         else
16006                 pci_disable_device(pdev);
16007
16008         rtnl_unlock();
16009
16010         return err;
16011 }
16012
16013 /**
16014  * tg3_io_slot_reset - called after the pci bus has been reset.
16015  * @pdev: Pointer to PCI device
16016  *
16017  * Restart the card from scratch, as if from a cold-boot.
16018  * At this point, the card has exprienced a hard reset,
16019  * followed by fixups by BIOS, and has its config space
16020  * set up identically to what it was at cold boot.
16021  */
16022 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16023 {
16024         struct net_device *netdev = pci_get_drvdata(pdev);
16025         struct tg3 *tp = netdev_priv(netdev);
16026         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16027         int err;
16028
16029         rtnl_lock();
16030
16031         if (pci_enable_device(pdev)) {
16032                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16033                 goto done;
16034         }
16035
16036         pci_set_master(pdev);
16037         pci_restore_state(pdev);
16038         pci_save_state(pdev);
16039
16040         if (!netif_running(netdev)) {
16041                 rc = PCI_ERS_RESULT_RECOVERED;
16042                 goto done;
16043         }
16044
16045         err = tg3_power_up(tp);
16046         if (err)
16047                 goto done;
16048
16049         rc = PCI_ERS_RESULT_RECOVERED;
16050
16051 done:
16052         rtnl_unlock();
16053
16054         return rc;
16055 }
16056
16057 /**
16058  * tg3_io_resume - called when traffic can start flowing again.
16059  * @pdev: Pointer to PCI device
16060  *
16061  * This callback is called when the error recovery driver tells
16062  * us that its OK to resume normal operation.
16063  */
16064 static void tg3_io_resume(struct pci_dev *pdev)
16065 {
16066         struct net_device *netdev = pci_get_drvdata(pdev);
16067         struct tg3 *tp = netdev_priv(netdev);
16068         int err;
16069
16070         rtnl_lock();
16071
16072         if (!netif_running(netdev))
16073                 goto done;
16074
16075         tg3_full_lock(tp, 0);
16076         tg3_flag_set(tp, INIT_COMPLETE);
16077         err = tg3_restart_hw(tp, 1);
16078         tg3_full_unlock(tp);
16079         if (err) {
16080                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16081                 goto done;
16082         }
16083
16084         netif_device_attach(netdev);
16085
16086         tg3_timer_start(tp);
16087
16088         tg3_netif_start(tp);
16089
16090         tg3_phy_start(tp);
16091
16092 done:
16093         rtnl_unlock();
16094 }
16095
16096 static struct pci_error_handlers tg3_err_handler = {
16097         .error_detected = tg3_io_error_detected,
16098         .slot_reset     = tg3_io_slot_reset,
16099         .resume         = tg3_io_resume
16100 };
16101
16102 static struct pci_driver tg3_driver = {
16103         .name           = DRV_MODULE_NAME,
16104         .id_table       = tg3_pci_tbl,
16105         .probe          = tg3_init_one,
16106         .remove         = __devexit_p(tg3_remove_one),
16107         .err_handler    = &tg3_err_handler,
16108         .driver.pm      = TG3_PM_OPS,
16109 };
16110
16111 static int __init tg3_init(void)
16112 {
16113         return pci_register_driver(&tg3_driver);
16114 }
16115
16116 static void __exit tg3_cleanup(void)
16117 {
16118         pci_unregister_driver(&tg3_driver);
16119 }
16120
16121 module_init(tg3_init);
16122 module_exit(tg3_cleanup);