]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net...
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882         /* check for RX/TX work to do */
883         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
884             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
885                 work_exists = 1;
886
887         return work_exists;
888 }
889
890 /* tg3_int_reenable
891  *  similar to tg3_enable_ints, but it accurately determines whether there
892  *  is new work pending and can return without flushing the PIO write
893  *  which reenables interrupts
894  */
895 static void tg3_int_reenable(struct tg3_napi *tnapi)
896 {
897         struct tg3 *tp = tnapi->tp;
898
899         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
900         mmiowb();
901
902         /* When doing tagged status, this work check is unnecessary.
903          * The last_tag we write above tells the chip which piece of
904          * work we've completed.
905          */
906         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
907                 tw32(HOSTCC_MODE, tp->coalesce_mode |
908                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
909 }
910
911 static void tg3_switch_clocks(struct tg3 *tp)
912 {
913         u32 clock_ctrl;
914         u32 orig_clock_ctrl;
915
916         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
917                 return;
918
919         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
920
921         orig_clock_ctrl = clock_ctrl;
922         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
923                        CLOCK_CTRL_CLKRUN_OENABLE |
924                        0x1f);
925         tp->pci_clock_ctrl = clock_ctrl;
926
927         if (tg3_flag(tp, 5705_PLUS)) {
928                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
929                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
930                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
931                 }
932         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
933                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
934                             clock_ctrl |
935                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
936                             40);
937                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
938                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
939                             40);
940         }
941         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
942 }
943
944 #define PHY_BUSY_LOOPS  5000
945
946 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
947 {
948         u32 frame_val;
949         unsigned int loops;
950         int ret;
951
952         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
953                 tw32_f(MAC_MI_MODE,
954                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
955                 udelay(80);
956         }
957
958         *val = 0x0;
959
960         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
961                       MI_COM_PHY_ADDR_MASK);
962         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
963                       MI_COM_REG_ADDR_MASK);
964         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
965
966         tw32_f(MAC_MI_COM, frame_val);
967
968         loops = PHY_BUSY_LOOPS;
969         while (loops != 0) {
970                 udelay(10);
971                 frame_val = tr32(MAC_MI_COM);
972
973                 if ((frame_val & MI_COM_BUSY) == 0) {
974                         udelay(5);
975                         frame_val = tr32(MAC_MI_COM);
976                         break;
977                 }
978                 loops -= 1;
979         }
980
981         ret = -EBUSY;
982         if (loops != 0) {
983                 *val = frame_val & MI_COM_DATA_MASK;
984                 ret = 0;
985         }
986
987         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
988                 tw32_f(MAC_MI_MODE, tp->mi_mode);
989                 udelay(80);
990         }
991
992         return ret;
993 }
994
995 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
996 {
997         u32 frame_val;
998         unsigned int loops;
999         int ret;
1000
1001         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1002             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1003                 return 0;
1004
1005         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1006                 tw32_f(MAC_MI_MODE,
1007                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1008                 udelay(80);
1009         }
1010
1011         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1012                       MI_COM_PHY_ADDR_MASK);
1013         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1014                       MI_COM_REG_ADDR_MASK);
1015         frame_val |= (val & MI_COM_DATA_MASK);
1016         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1017
1018         tw32_f(MAC_MI_COM, frame_val);
1019
1020         loops = PHY_BUSY_LOOPS;
1021         while (loops != 0) {
1022                 udelay(10);
1023                 frame_val = tr32(MAC_MI_COM);
1024                 if ((frame_val & MI_COM_BUSY) == 0) {
1025                         udelay(5);
1026                         frame_val = tr32(MAC_MI_COM);
1027                         break;
1028                 }
1029                 loops -= 1;
1030         }
1031
1032         ret = -EBUSY;
1033         if (loops != 0)
1034                 ret = 0;
1035
1036         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1037                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1038                 udelay(80);
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1045 {
1046         int err;
1047
1048         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1049         if (err)
1050                 goto done;
1051
1052         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1053         if (err)
1054                 goto done;
1055
1056         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1057                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1062
1063 done:
1064         return err;
1065 }
1066
1067 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1068 {
1069         int err;
1070
1071         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1072         if (err)
1073                 goto done;
1074
1075         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1076         if (err)
1077                 goto done;
1078
1079         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1080                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1085
1086 done:
1087         return err;
1088 }
1089
1090 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1091 {
1092         int err;
1093
1094         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1095         if (!err)
1096                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1097
1098         return err;
1099 }
1100
1101 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1102 {
1103         int err;
1104
1105         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1106         if (!err)
1107                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1108
1109         return err;
1110 }
1111
1112 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1113 {
1114         int err;
1115
1116         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1117                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1118                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1119         if (!err)
1120                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1121
1122         return err;
1123 }
1124
1125 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1126 {
1127         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1128                 set |= MII_TG3_AUXCTL_MISC_WREN;
1129
1130         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1131 }
1132
1133 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1134         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1135                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1136                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1137
1138 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1139         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1141
1142 static int tg3_bmcr_reset(struct tg3 *tp)
1143 {
1144         u32 phy_control;
1145         int limit, err;
1146
1147         /* OK, reset it, and poll the BMCR_RESET bit until it
1148          * clears or we time out.
1149          */
1150         phy_control = BMCR_RESET;
1151         err = tg3_writephy(tp, MII_BMCR, phy_control);
1152         if (err != 0)
1153                 return -EBUSY;
1154
1155         limit = 5000;
1156         while (limit--) {
1157                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1158                 if (err != 0)
1159                         return -EBUSY;
1160
1161                 if ((phy_control & BMCR_RESET) == 0) {
1162                         udelay(40);
1163                         break;
1164                 }
1165                 udelay(10);
1166         }
1167         if (limit < 0)
1168                 return -EBUSY;
1169
1170         return 0;
1171 }
1172
1173 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1174 {
1175         struct tg3 *tp = bp->priv;
1176         u32 val;
1177
1178         spin_lock_bh(&tp->lock);
1179
1180         if (tg3_readphy(tp, reg, &val))
1181                 val = -EIO;
1182
1183         spin_unlock_bh(&tp->lock);
1184
1185         return val;
1186 }
1187
1188 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1189 {
1190         struct tg3 *tp = bp->priv;
1191         u32 ret = 0;
1192
1193         spin_lock_bh(&tp->lock);
1194
1195         if (tg3_writephy(tp, reg, val))
1196                 ret = -EIO;
1197
1198         spin_unlock_bh(&tp->lock);
1199
1200         return ret;
1201 }
1202
1203 static int tg3_mdio_reset(struct mii_bus *bp)
1204 {
1205         return 0;
1206 }
1207
1208 static void tg3_mdio_config_5785(struct tg3 *tp)
1209 {
1210         u32 val;
1211         struct phy_device *phydev;
1212
1213         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1214         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1215         case PHY_ID_BCM50610:
1216         case PHY_ID_BCM50610M:
1217                 val = MAC_PHYCFG2_50610_LED_MODES;
1218                 break;
1219         case PHY_ID_BCMAC131:
1220                 val = MAC_PHYCFG2_AC131_LED_MODES;
1221                 break;
1222         case PHY_ID_RTL8211C:
1223                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1224                 break;
1225         case PHY_ID_RTL8201E:
1226                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1227                 break;
1228         default:
1229                 return;
1230         }
1231
1232         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1233                 tw32(MAC_PHYCFG2, val);
1234
1235                 val = tr32(MAC_PHYCFG1);
1236                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1237                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1238                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1239                 tw32(MAC_PHYCFG1, val);
1240
1241                 return;
1242         }
1243
1244         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1245                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1246                        MAC_PHYCFG2_FMODE_MASK_MASK |
1247                        MAC_PHYCFG2_GMODE_MASK_MASK |
1248                        MAC_PHYCFG2_ACT_MASK_MASK   |
1249                        MAC_PHYCFG2_QUAL_MASK_MASK |
1250                        MAC_PHYCFG2_INBAND_ENABLE;
1251
1252         tw32(MAC_PHYCFG2, val);
1253
1254         val = tr32(MAC_PHYCFG1);
1255         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1256                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1257         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1258                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1259                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1260                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1261                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1262         }
1263         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1264                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1265         tw32(MAC_PHYCFG1, val);
1266
1267         val = tr32(MAC_EXT_RGMII_MODE);
1268         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1269                  MAC_RGMII_MODE_RX_QUALITY |
1270                  MAC_RGMII_MODE_RX_ACTIVITY |
1271                  MAC_RGMII_MODE_RX_ENG_DET |
1272                  MAC_RGMII_MODE_TX_ENABLE |
1273                  MAC_RGMII_MODE_TX_LOWPWR |
1274                  MAC_RGMII_MODE_TX_RESET);
1275         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1276                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1277                         val |= MAC_RGMII_MODE_RX_INT_B |
1278                                MAC_RGMII_MODE_RX_QUALITY |
1279                                MAC_RGMII_MODE_RX_ACTIVITY |
1280                                MAC_RGMII_MODE_RX_ENG_DET;
1281                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1282                         val |= MAC_RGMII_MODE_TX_ENABLE |
1283                                MAC_RGMII_MODE_TX_LOWPWR |
1284                                MAC_RGMII_MODE_TX_RESET;
1285         }
1286         tw32(MAC_EXT_RGMII_MODE, val);
1287 }
1288
1289 static void tg3_mdio_start(struct tg3 *tp)
1290 {
1291         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1292         tw32_f(MAC_MI_MODE, tp->mi_mode);
1293         udelay(80);
1294
1295         if (tg3_flag(tp, MDIOBUS_INITED) &&
1296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1297                 tg3_mdio_config_5785(tp);
1298 }
1299
1300 static int tg3_mdio_init(struct tg3 *tp)
1301 {
1302         int i;
1303         u32 reg;
1304         struct phy_device *phydev;
1305
1306         if (tg3_flag(tp, 5717_PLUS)) {
1307                 u32 is_serdes;
1308
1309                 tp->phy_addr = tp->pci_fn + 1;
1310
1311                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1312                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1313                 else
1314                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1315                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1316                 if (is_serdes)
1317                         tp->phy_addr += 7;
1318         } else
1319                 tp->phy_addr = TG3_PHY_MII_ADDR;
1320
1321         tg3_mdio_start(tp);
1322
1323         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1324                 return 0;
1325
1326         tp->mdio_bus = mdiobus_alloc();
1327         if (tp->mdio_bus == NULL)
1328                 return -ENOMEM;
1329
1330         tp->mdio_bus->name     = "tg3 mdio bus";
1331         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1332                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1333         tp->mdio_bus->priv     = tp;
1334         tp->mdio_bus->parent   = &tp->pdev->dev;
1335         tp->mdio_bus->read     = &tg3_mdio_read;
1336         tp->mdio_bus->write    = &tg3_mdio_write;
1337         tp->mdio_bus->reset    = &tg3_mdio_reset;
1338         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1339         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1340
1341         for (i = 0; i < PHY_MAX_ADDR; i++)
1342                 tp->mdio_bus->irq[i] = PHY_POLL;
1343
1344         /* The bus registration will look for all the PHYs on the mdio bus.
1345          * Unfortunately, it does not ensure the PHY is powered up before
1346          * accessing the PHY ID registers.  A chip reset is the
1347          * quickest way to bring the device back to an operational state..
1348          */
1349         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1350                 tg3_bmcr_reset(tp);
1351
1352         i = mdiobus_register(tp->mdio_bus);
1353         if (i) {
1354                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1355                 mdiobus_free(tp->mdio_bus);
1356                 return i;
1357         }
1358
1359         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1360
1361         if (!phydev || !phydev->drv) {
1362                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1363                 mdiobus_unregister(tp->mdio_bus);
1364                 mdiobus_free(tp->mdio_bus);
1365                 return -ENODEV;
1366         }
1367
1368         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1369         case PHY_ID_BCM57780:
1370                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1371                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1372                 break;
1373         case PHY_ID_BCM50610:
1374         case PHY_ID_BCM50610M:
1375                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1376                                      PHY_BRCM_RX_REFCLK_UNUSED |
1377                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1378                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1379                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1380                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1381                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1382                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1383                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1384                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1385                 /* fallthru */
1386         case PHY_ID_RTL8211C:
1387                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1388                 break;
1389         case PHY_ID_RTL8201E:
1390         case PHY_ID_BCMAC131:
1391                 phydev->interface = PHY_INTERFACE_MODE_MII;
1392                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1393                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1394                 break;
1395         }
1396
1397         tg3_flag_set(tp, MDIOBUS_INITED);
1398
1399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1400                 tg3_mdio_config_5785(tp);
1401
1402         return 0;
1403 }
1404
1405 static void tg3_mdio_fini(struct tg3 *tp)
1406 {
1407         if (tg3_flag(tp, MDIOBUS_INITED)) {
1408                 tg3_flag_clear(tp, MDIOBUS_INITED);
1409                 mdiobus_unregister(tp->mdio_bus);
1410                 mdiobus_free(tp->mdio_bus);
1411         }
1412 }
1413
1414 /* tp->lock is held. */
1415 static inline void tg3_generate_fw_event(struct tg3 *tp)
1416 {
1417         u32 val;
1418
1419         val = tr32(GRC_RX_CPU_EVENT);
1420         val |= GRC_RX_CPU_DRIVER_EVENT;
1421         tw32_f(GRC_RX_CPU_EVENT, val);
1422
1423         tp->last_event_jiffies = jiffies;
1424 }
1425
1426 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1427
1428 /* tp->lock is held. */
1429 static void tg3_wait_for_event_ack(struct tg3 *tp)
1430 {
1431         int i;
1432         unsigned int delay_cnt;
1433         long time_remain;
1434
1435         /* If enough time has passed, no wait is necessary. */
1436         time_remain = (long)(tp->last_event_jiffies + 1 +
1437                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1438                       (long)jiffies;
1439         if (time_remain < 0)
1440                 return;
1441
1442         /* Check if we can shorten the wait time. */
1443         delay_cnt = jiffies_to_usecs(time_remain);
1444         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1445                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1446         delay_cnt = (delay_cnt >> 3) + 1;
1447
1448         for (i = 0; i < delay_cnt; i++) {
1449                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1450                         break;
1451                 udelay(8);
1452         }
1453 }
1454
1455 /* tp->lock is held. */
1456 static void tg3_ump_link_report(struct tg3 *tp)
1457 {
1458         u32 reg;
1459         u32 val;
1460
1461         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1462                 return;
1463
1464         tg3_wait_for_event_ack(tp);
1465
1466         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1467
1468         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1469
1470         val = 0;
1471         if (!tg3_readphy(tp, MII_BMCR, &reg))
1472                 val = reg << 16;
1473         if (!tg3_readphy(tp, MII_BMSR, &reg))
1474                 val |= (reg & 0xffff);
1475         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1476
1477         val = 0;
1478         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1479                 val = reg << 16;
1480         if (!tg3_readphy(tp, MII_LPA, &reg))
1481                 val |= (reg & 0xffff);
1482         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1483
1484         val = 0;
1485         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1486                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1487                         val = reg << 16;
1488                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1489                         val |= (reg & 0xffff);
1490         }
1491         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1492
1493         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1494                 val = reg << 16;
1495         else
1496                 val = 0;
1497         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1498
1499         tg3_generate_fw_event(tp);
1500 }
1501
1502 /* tp->lock is held. */
1503 static void tg3_stop_fw(struct tg3 *tp)
1504 {
1505         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1506                 /* Wait for RX cpu to ACK the previous event. */
1507                 tg3_wait_for_event_ack(tp);
1508
1509                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1510
1511                 tg3_generate_fw_event(tp);
1512
1513                 /* Wait for RX cpu to ACK this event. */
1514                 tg3_wait_for_event_ack(tp);
1515         }
1516 }
1517
1518 /* tp->lock is held. */
1519 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1520 {
1521         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1522                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1523
1524         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1525                 switch (kind) {
1526                 case RESET_KIND_INIT:
1527                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1528                                       DRV_STATE_START);
1529                         break;
1530
1531                 case RESET_KIND_SHUTDOWN:
1532                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1533                                       DRV_STATE_UNLOAD);
1534                         break;
1535
1536                 case RESET_KIND_SUSPEND:
1537                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1538                                       DRV_STATE_SUSPEND);
1539                         break;
1540
1541                 default:
1542                         break;
1543                 }
1544         }
1545
1546         if (kind == RESET_KIND_INIT ||
1547             kind == RESET_KIND_SUSPEND)
1548                 tg3_ape_driver_state_change(tp, kind);
1549 }
1550
1551 /* tp->lock is held. */
1552 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1553 {
1554         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1555                 switch (kind) {
1556                 case RESET_KIND_INIT:
1557                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1558                                       DRV_STATE_START_DONE);
1559                         break;
1560
1561                 case RESET_KIND_SHUTDOWN:
1562                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1563                                       DRV_STATE_UNLOAD_DONE);
1564                         break;
1565
1566                 default:
1567                         break;
1568                 }
1569         }
1570
1571         if (kind == RESET_KIND_SHUTDOWN)
1572                 tg3_ape_driver_state_change(tp, kind);
1573 }
1574
1575 /* tp->lock is held. */
1576 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1577 {
1578         if (tg3_flag(tp, ENABLE_ASF)) {
1579                 switch (kind) {
1580                 case RESET_KIND_INIT:
1581                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1582                                       DRV_STATE_START);
1583                         break;
1584
1585                 case RESET_KIND_SHUTDOWN:
1586                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1587                                       DRV_STATE_UNLOAD);
1588                         break;
1589
1590                 case RESET_KIND_SUSPEND:
1591                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1592                                       DRV_STATE_SUSPEND);
1593                         break;
1594
1595                 default:
1596                         break;
1597                 }
1598         }
1599 }
1600
1601 static int tg3_poll_fw(struct tg3 *tp)
1602 {
1603         int i;
1604         u32 val;
1605
1606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1607                 /* Wait up to 20ms for init done. */
1608                 for (i = 0; i < 200; i++) {
1609                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1610                                 return 0;
1611                         udelay(100);
1612                 }
1613                 return -ENODEV;
1614         }
1615
1616         /* Wait for firmware initialization to complete. */
1617         for (i = 0; i < 100000; i++) {
1618                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1619                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1620                         break;
1621                 udelay(10);
1622         }
1623
1624         /* Chip might not be fitted with firmware.  Some Sun onboard
1625          * parts are configured like that.  So don't signal the timeout
1626          * of the above loop as an error, but do report the lack of
1627          * running firmware once.
1628          */
1629         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1630                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1631
1632                 netdev_info(tp->dev, "No firmware running\n");
1633         }
1634
1635         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1636                 /* The 57765 A0 needs a little more
1637                  * time to do some important work.
1638                  */
1639                 mdelay(10);
1640         }
1641
1642         return 0;
1643 }
1644
1645 static void tg3_link_report(struct tg3 *tp)
1646 {
1647         if (!netif_carrier_ok(tp->dev)) {
1648                 netif_info(tp, link, tp->dev, "Link is down\n");
1649                 tg3_ump_link_report(tp);
1650         } else if (netif_msg_link(tp)) {
1651                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1652                             (tp->link_config.active_speed == SPEED_1000 ?
1653                              1000 :
1654                              (tp->link_config.active_speed == SPEED_100 ?
1655                               100 : 10)),
1656                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1657                              "full" : "half"));
1658
1659                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1660                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1661                             "on" : "off",
1662                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1663                             "on" : "off");
1664
1665                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1666                         netdev_info(tp->dev, "EEE is %s\n",
1667                                     tp->setlpicnt ? "enabled" : "disabled");
1668
1669                 tg3_ump_link_report(tp);
1670         }
1671 }
1672
1673 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1674 {
1675         u16 miireg;
1676
1677         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1678                 miireg = ADVERTISE_1000XPAUSE;
1679         else if (flow_ctrl & FLOW_CTRL_TX)
1680                 miireg = ADVERTISE_1000XPSE_ASYM;
1681         else if (flow_ctrl & FLOW_CTRL_RX)
1682                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1683         else
1684                 miireg = 0;
1685
1686         return miireg;
1687 }
1688
1689 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1690 {
1691         u8 cap = 0;
1692
1693         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1694                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1695         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1696                 if (lcladv & ADVERTISE_1000XPAUSE)
1697                         cap = FLOW_CTRL_RX;
1698                 if (rmtadv & ADVERTISE_1000XPAUSE)
1699                         cap = FLOW_CTRL_TX;
1700         }
1701
1702         return cap;
1703 }
1704
1705 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1706 {
1707         u8 autoneg;
1708         u8 flowctrl = 0;
1709         u32 old_rx_mode = tp->rx_mode;
1710         u32 old_tx_mode = tp->tx_mode;
1711
1712         if (tg3_flag(tp, USE_PHYLIB))
1713                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1714         else
1715                 autoneg = tp->link_config.autoneg;
1716
1717         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1718                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1719                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1720                 else
1721                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1722         } else
1723                 flowctrl = tp->link_config.flowctrl;
1724
1725         tp->link_config.active_flowctrl = flowctrl;
1726
1727         if (flowctrl & FLOW_CTRL_RX)
1728                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1729         else
1730                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1731
1732         if (old_rx_mode != tp->rx_mode)
1733                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1734
1735         if (flowctrl & FLOW_CTRL_TX)
1736                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1737         else
1738                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1739
1740         if (old_tx_mode != tp->tx_mode)
1741                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1742 }
1743
1744 static void tg3_adjust_link(struct net_device *dev)
1745 {
1746         u8 oldflowctrl, linkmesg = 0;
1747         u32 mac_mode, lcl_adv, rmt_adv;
1748         struct tg3 *tp = netdev_priv(dev);
1749         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1750
1751         spin_lock_bh(&tp->lock);
1752
1753         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1754                                     MAC_MODE_HALF_DUPLEX);
1755
1756         oldflowctrl = tp->link_config.active_flowctrl;
1757
1758         if (phydev->link) {
1759                 lcl_adv = 0;
1760                 rmt_adv = 0;
1761
1762                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1763                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1764                 else if (phydev->speed == SPEED_1000 ||
1765                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1766                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1767                 else
1768                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1769
1770                 if (phydev->duplex == DUPLEX_HALF)
1771                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1772                 else {
1773                         lcl_adv = mii_advertise_flowctrl(
1774                                   tp->link_config.flowctrl);
1775
1776                         if (phydev->pause)
1777                                 rmt_adv = LPA_PAUSE_CAP;
1778                         if (phydev->asym_pause)
1779                                 rmt_adv |= LPA_PAUSE_ASYM;
1780                 }
1781
1782                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1783         } else
1784                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1785
1786         if (mac_mode != tp->mac_mode) {
1787                 tp->mac_mode = mac_mode;
1788                 tw32_f(MAC_MODE, tp->mac_mode);
1789                 udelay(40);
1790         }
1791
1792         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1793                 if (phydev->speed == SPEED_10)
1794                         tw32(MAC_MI_STAT,
1795                              MAC_MI_STAT_10MBPS_MODE |
1796                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1797                 else
1798                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1799         }
1800
1801         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1802                 tw32(MAC_TX_LENGTHS,
1803                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1804                       (6 << TX_LENGTHS_IPG_SHIFT) |
1805                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1806         else
1807                 tw32(MAC_TX_LENGTHS,
1808                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1809                       (6 << TX_LENGTHS_IPG_SHIFT) |
1810                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1811
1812         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1813             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1814             phydev->speed != tp->link_config.active_speed ||
1815             phydev->duplex != tp->link_config.active_duplex ||
1816             oldflowctrl != tp->link_config.active_flowctrl)
1817                 linkmesg = 1;
1818
1819         tp->link_config.active_speed = phydev->speed;
1820         tp->link_config.active_duplex = phydev->duplex;
1821
1822         spin_unlock_bh(&tp->lock);
1823
1824         if (linkmesg)
1825                 tg3_link_report(tp);
1826 }
1827
1828 static int tg3_phy_init(struct tg3 *tp)
1829 {
1830         struct phy_device *phydev;
1831
1832         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1833                 return 0;
1834
1835         /* Bring the PHY back to a known state. */
1836         tg3_bmcr_reset(tp);
1837
1838         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1839
1840         /* Attach the MAC to the PHY. */
1841         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1842                              phydev->dev_flags, phydev->interface);
1843         if (IS_ERR(phydev)) {
1844                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1845                 return PTR_ERR(phydev);
1846         }
1847
1848         /* Mask with MAC supported features. */
1849         switch (phydev->interface) {
1850         case PHY_INTERFACE_MODE_GMII:
1851         case PHY_INTERFACE_MODE_RGMII:
1852                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1853                         phydev->supported &= (PHY_GBIT_FEATURES |
1854                                               SUPPORTED_Pause |
1855                                               SUPPORTED_Asym_Pause);
1856                         break;
1857                 }
1858                 /* fallthru */
1859         case PHY_INTERFACE_MODE_MII:
1860                 phydev->supported &= (PHY_BASIC_FEATURES |
1861                                       SUPPORTED_Pause |
1862                                       SUPPORTED_Asym_Pause);
1863                 break;
1864         default:
1865                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1866                 return -EINVAL;
1867         }
1868
1869         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1870
1871         phydev->advertising = phydev->supported;
1872
1873         return 0;
1874 }
1875
1876 static void tg3_phy_start(struct tg3 *tp)
1877 {
1878         struct phy_device *phydev;
1879
1880         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1881                 return;
1882
1883         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1884
1885         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1886                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1887                 phydev->speed = tp->link_config.orig_speed;
1888                 phydev->duplex = tp->link_config.orig_duplex;
1889                 phydev->autoneg = tp->link_config.orig_autoneg;
1890                 phydev->advertising = tp->link_config.orig_advertising;
1891         }
1892
1893         phy_start(phydev);
1894
1895         phy_start_aneg(phydev);
1896 }
1897
1898 static void tg3_phy_stop(struct tg3 *tp)
1899 {
1900         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1901                 return;
1902
1903         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1904 }
1905
1906 static void tg3_phy_fini(struct tg3 *tp)
1907 {
1908         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1909                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1910                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1911         }
1912 }
1913
1914 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1915 {
1916         int err;
1917         u32 val;
1918
1919         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1920                 return 0;
1921
1922         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1923                 /* Cannot do read-modify-write on 5401 */
1924                 err = tg3_phy_auxctl_write(tp,
1925                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1926                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1927                                            0x4c20);
1928                 goto done;
1929         }
1930
1931         err = tg3_phy_auxctl_read(tp,
1932                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1933         if (err)
1934                 return err;
1935
1936         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1937         err = tg3_phy_auxctl_write(tp,
1938                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1939
1940 done:
1941         return err;
1942 }
1943
1944 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1945 {
1946         u32 phytest;
1947
1948         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1949                 u32 phy;
1950
1951                 tg3_writephy(tp, MII_TG3_FET_TEST,
1952                              phytest | MII_TG3_FET_SHADOW_EN);
1953                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1954                         if (enable)
1955                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1956                         else
1957                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1958                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1959                 }
1960                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1961         }
1962 }
1963
1964 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1965 {
1966         u32 reg;
1967
1968         if (!tg3_flag(tp, 5705_PLUS) ||
1969             (tg3_flag(tp, 5717_PLUS) &&
1970              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1971                 return;
1972
1973         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1974                 tg3_phy_fet_toggle_apd(tp, enable);
1975                 return;
1976         }
1977
1978         reg = MII_TG3_MISC_SHDW_WREN |
1979               MII_TG3_MISC_SHDW_SCR5_SEL |
1980               MII_TG3_MISC_SHDW_SCR5_LPED |
1981               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1982               MII_TG3_MISC_SHDW_SCR5_SDTL |
1983               MII_TG3_MISC_SHDW_SCR5_C125OE;
1984         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1985                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1986
1987         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1988
1989
1990         reg = MII_TG3_MISC_SHDW_WREN |
1991               MII_TG3_MISC_SHDW_APD_SEL |
1992               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1993         if (enable)
1994                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1995
1996         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1997 }
1998
1999 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2000 {
2001         u32 phy;
2002
2003         if (!tg3_flag(tp, 5705_PLUS) ||
2004             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2005                 return;
2006
2007         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2008                 u32 ephy;
2009
2010                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2011                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2012
2013                         tg3_writephy(tp, MII_TG3_FET_TEST,
2014                                      ephy | MII_TG3_FET_SHADOW_EN);
2015                         if (!tg3_readphy(tp, reg, &phy)) {
2016                                 if (enable)
2017                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2018                                 else
2019                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2020                                 tg3_writephy(tp, reg, phy);
2021                         }
2022                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2023                 }
2024         } else {
2025                 int ret;
2026
2027                 ret = tg3_phy_auxctl_read(tp,
2028                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2029                 if (!ret) {
2030                         if (enable)
2031                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2032                         else
2033                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2034                         tg3_phy_auxctl_write(tp,
2035                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2036                 }
2037         }
2038 }
2039
2040 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2041 {
2042         int ret;
2043         u32 val;
2044
2045         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2046                 return;
2047
2048         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2049         if (!ret)
2050                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2051                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2052 }
2053
2054 static void tg3_phy_apply_otp(struct tg3 *tp)
2055 {
2056         u32 otp, phy;
2057
2058         if (!tp->phy_otp)
2059                 return;
2060
2061         otp = tp->phy_otp;
2062
2063         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2064                 return;
2065
2066         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2067         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2068         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2069
2070         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2071               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2072         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2073
2074         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2075         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2076         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2077
2078         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2079         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2080
2081         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2082         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2083
2084         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2085               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2086         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2087
2088         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2089 }
2090
2091 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2092 {
2093         u32 val;
2094
2095         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2096                 return;
2097
2098         tp->setlpicnt = 0;
2099
2100         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2101             current_link_up == 1 &&
2102             tp->link_config.active_duplex == DUPLEX_FULL &&
2103             (tp->link_config.active_speed == SPEED_100 ||
2104              tp->link_config.active_speed == SPEED_1000)) {
2105                 u32 eeectl;
2106
2107                 if (tp->link_config.active_speed == SPEED_1000)
2108                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2109                 else
2110                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2111
2112                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2113
2114                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2115                                   TG3_CL45_D7_EEERES_STAT, &val);
2116
2117                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2118                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2119                         tp->setlpicnt = 2;
2120         }
2121
2122         if (!tp->setlpicnt) {
2123                 if (current_link_up == 1 &&
2124                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2125                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2126                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2127                 }
2128
2129                 val = tr32(TG3_CPMU_EEE_MODE);
2130                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2131         }
2132 }
2133
2134 static void tg3_phy_eee_enable(struct tg3 *tp)
2135 {
2136         u32 val;
2137
2138         if (tp->link_config.active_speed == SPEED_1000 &&
2139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2140              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2141              tg3_flag(tp, 57765_CLASS)) &&
2142             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2143                 val = MII_TG3_DSP_TAP26_ALNOKO |
2144                       MII_TG3_DSP_TAP26_RMRXSTO;
2145                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2146                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2147         }
2148
2149         val = tr32(TG3_CPMU_EEE_MODE);
2150         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2151 }
2152
2153 static int tg3_wait_macro_done(struct tg3 *tp)
2154 {
2155         int limit = 100;
2156
2157         while (limit--) {
2158                 u32 tmp32;
2159
2160                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2161                         if ((tmp32 & 0x1000) == 0)
2162                                 break;
2163                 }
2164         }
2165         if (limit < 0)
2166                 return -EBUSY;
2167
2168         return 0;
2169 }
2170
2171 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2172 {
2173         static const u32 test_pat[4][6] = {
2174         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2175         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2176         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2177         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2178         };
2179         int chan;
2180
2181         for (chan = 0; chan < 4; chan++) {
2182                 int i;
2183
2184                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2185                              (chan * 0x2000) | 0x0200);
2186                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2187
2188                 for (i = 0; i < 6; i++)
2189                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2190                                      test_pat[chan][i]);
2191
2192                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2193                 if (tg3_wait_macro_done(tp)) {
2194                         *resetp = 1;
2195                         return -EBUSY;
2196                 }
2197
2198                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2199                              (chan * 0x2000) | 0x0200);
2200                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2201                 if (tg3_wait_macro_done(tp)) {
2202                         *resetp = 1;
2203                         return -EBUSY;
2204                 }
2205
2206                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2207                 if (tg3_wait_macro_done(tp)) {
2208                         *resetp = 1;
2209                         return -EBUSY;
2210                 }
2211
2212                 for (i = 0; i < 6; i += 2) {
2213                         u32 low, high;
2214
2215                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2216                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2217                             tg3_wait_macro_done(tp)) {
2218                                 *resetp = 1;
2219                                 return -EBUSY;
2220                         }
2221                         low &= 0x7fff;
2222                         high &= 0x000f;
2223                         if (low != test_pat[chan][i] ||
2224                             high != test_pat[chan][i+1]) {
2225                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2226                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2227                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2228
2229                                 return -EBUSY;
2230                         }
2231                 }
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2238 {
2239         int chan;
2240
2241         for (chan = 0; chan < 4; chan++) {
2242                 int i;
2243
2244                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2245                              (chan * 0x2000) | 0x0200);
2246                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2247                 for (i = 0; i < 6; i++)
2248                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2249                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2250                 if (tg3_wait_macro_done(tp))
2251                         return -EBUSY;
2252         }
2253
2254         return 0;
2255 }
2256
2257 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2258 {
2259         u32 reg32, phy9_orig;
2260         int retries, do_phy_reset, err;
2261
2262         retries = 10;
2263         do_phy_reset = 1;
2264         do {
2265                 if (do_phy_reset) {
2266                         err = tg3_bmcr_reset(tp);
2267                         if (err)
2268                                 return err;
2269                         do_phy_reset = 0;
2270                 }
2271
2272                 /* Disable transmitter and interrupt.  */
2273                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2274                         continue;
2275
2276                 reg32 |= 0x3000;
2277                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2278
2279                 /* Set full-duplex, 1000 mbps.  */
2280                 tg3_writephy(tp, MII_BMCR,
2281                              BMCR_FULLDPLX | BMCR_SPEED1000);
2282
2283                 /* Set to master mode.  */
2284                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2285                         continue;
2286
2287                 tg3_writephy(tp, MII_CTRL1000,
2288                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2289
2290                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2291                 if (err)
2292                         return err;
2293
2294                 /* Block the PHY control access.  */
2295                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2296
2297                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2298                 if (!err)
2299                         break;
2300         } while (--retries);
2301
2302         err = tg3_phy_reset_chanpat(tp);
2303         if (err)
2304                 return err;
2305
2306         tg3_phydsp_write(tp, 0x8005, 0x0000);
2307
2308         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2309         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2310
2311         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2312
2313         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2314
2315         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2316                 reg32 &= ~0x3000;
2317                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2318         } else if (!err)
2319                 err = -EBUSY;
2320
2321         return err;
2322 }
2323
2324 /* This will reset the tigon3 PHY if there is no valid
2325  * link unless the FORCE argument is non-zero.
2326  */
2327 static int tg3_phy_reset(struct tg3 *tp)
2328 {
2329         u32 val, cpmuctrl;
2330         int err;
2331
2332         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2333                 val = tr32(GRC_MISC_CFG);
2334                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2335                 udelay(40);
2336         }
2337         err  = tg3_readphy(tp, MII_BMSR, &val);
2338         err |= tg3_readphy(tp, MII_BMSR, &val);
2339         if (err != 0)
2340                 return -EBUSY;
2341
2342         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2343                 netif_carrier_off(tp->dev);
2344                 tg3_link_report(tp);
2345         }
2346
2347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2349             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2350                 err = tg3_phy_reset_5703_4_5(tp);
2351                 if (err)
2352                         return err;
2353                 goto out;
2354         }
2355
2356         cpmuctrl = 0;
2357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2358             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2359                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2360                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2361                         tw32(TG3_CPMU_CTRL,
2362                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2363         }
2364
2365         err = tg3_bmcr_reset(tp);
2366         if (err)
2367                 return err;
2368
2369         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2370                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2371                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2372
2373                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2374         }
2375
2376         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2377             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2378                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2379                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2380                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2381                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2382                         udelay(40);
2383                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2384                 }
2385         }
2386
2387         if (tg3_flag(tp, 5717_PLUS) &&
2388             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2389                 return 0;
2390
2391         tg3_phy_apply_otp(tp);
2392
2393         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2394                 tg3_phy_toggle_apd(tp, true);
2395         else
2396                 tg3_phy_toggle_apd(tp, false);
2397
2398 out:
2399         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2400             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2401                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2402                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2403                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2404         }
2405
2406         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2407                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2408                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2409         }
2410
2411         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2412                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2413                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2414                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2415                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2416                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417                 }
2418         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2419                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2420                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2421                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2422                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2423                                 tg3_writephy(tp, MII_TG3_TEST1,
2424                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2425                         } else
2426                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2427
2428                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429                 }
2430         }
2431
2432         /* Set Extended packet length bit (bit 14) on all chips that */
2433         /* support jumbo frames */
2434         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2435                 /* Cannot do read-modify-write on 5401 */
2436                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2437         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2438                 /* Set bit 14 with read-modify-write to preserve other bits */
2439                 err = tg3_phy_auxctl_read(tp,
2440                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2441                 if (!err)
2442                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2443                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2444         }
2445
2446         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2447          * jumbo frames transmission.
2448          */
2449         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2450                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2451                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2452                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2453         }
2454
2455         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2456                 /* adjust output voltage */
2457                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2458         }
2459
2460         tg3_phy_toggle_automdix(tp, 1);
2461         tg3_phy_set_wirespeed(tp);
2462         return 0;
2463 }
2464
2465 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2466 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2467 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2468                                           TG3_GPIO_MSG_NEED_VAUX)
2469 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2470         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2471          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2472          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2473          (TG3_GPIO_MSG_DRVR_PRES << 12))
2474
2475 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2476         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2477          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2478          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2479          (TG3_GPIO_MSG_NEED_VAUX << 12))
2480
2481 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2482 {
2483         u32 status, shift;
2484
2485         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2486             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2487                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2488         else
2489                 status = tr32(TG3_CPMU_DRV_STATUS);
2490
2491         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2492         status &= ~(TG3_GPIO_MSG_MASK << shift);
2493         status |= (newstat << shift);
2494
2495         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2496             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2497                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2498         else
2499                 tw32(TG3_CPMU_DRV_STATUS, status);
2500
2501         return status >> TG3_APE_GPIO_MSG_SHIFT;
2502 }
2503
2504 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2505 {
2506         if (!tg3_flag(tp, IS_NIC))
2507                 return 0;
2508
2509         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2510             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2511             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2512                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2513                         return -EIO;
2514
2515                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2516
2517                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2518                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2519
2520                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2521         } else {
2522                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2523                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2524         }
2525
2526         return 0;
2527 }
2528
2529 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2530 {
2531         u32 grc_local_ctrl;
2532
2533         if (!tg3_flag(tp, IS_NIC) ||
2534             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2536                 return;
2537
2538         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2539
2540         tw32_wait_f(GRC_LOCAL_CTRL,
2541                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2542                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2543
2544         tw32_wait_f(GRC_LOCAL_CTRL,
2545                     grc_local_ctrl,
2546                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2547
2548         tw32_wait_f(GRC_LOCAL_CTRL,
2549                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2550                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2551 }
2552
2553 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2554 {
2555         if (!tg3_flag(tp, IS_NIC))
2556                 return;
2557
2558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2560                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2561                             (GRC_LCLCTRL_GPIO_OE0 |
2562                              GRC_LCLCTRL_GPIO_OE1 |
2563                              GRC_LCLCTRL_GPIO_OE2 |
2564                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2565                              GRC_LCLCTRL_GPIO_OUTPUT1),
2566                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2567         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2568                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2569                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2570                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2571                                      GRC_LCLCTRL_GPIO_OE1 |
2572                                      GRC_LCLCTRL_GPIO_OE2 |
2573                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2574                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2575                                      tp->grc_local_ctrl;
2576                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578
2579                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2580                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2581                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2584                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2585                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2586         } else {
2587                 u32 no_gpio2;
2588                 u32 grc_local_ctrl = 0;
2589
2590                 /* Workaround to prevent overdrawing Amps. */
2591                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2592                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2593                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2594                                     grc_local_ctrl,
2595                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2596                 }
2597
2598                 /* On 5753 and variants, GPIO2 cannot be used. */
2599                 no_gpio2 = tp->nic_sram_data_cfg &
2600                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2601
2602                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2603                                   GRC_LCLCTRL_GPIO_OE1 |
2604                                   GRC_LCLCTRL_GPIO_OE2 |
2605                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2606                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2607                 if (no_gpio2) {
2608                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2609                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2610                 }
2611                 tw32_wait_f(GRC_LOCAL_CTRL,
2612                             tp->grc_local_ctrl | grc_local_ctrl,
2613                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2614
2615                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2616
2617                 tw32_wait_f(GRC_LOCAL_CTRL,
2618                             tp->grc_local_ctrl | grc_local_ctrl,
2619                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2620
2621                 if (!no_gpio2) {
2622                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2623                         tw32_wait_f(GRC_LOCAL_CTRL,
2624                                     tp->grc_local_ctrl | grc_local_ctrl,
2625                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2626                 }
2627         }
2628 }
2629
2630 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2631 {
2632         u32 msg = 0;
2633
2634         /* Serialize power state transitions */
2635         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2636                 return;
2637
2638         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2639                 msg = TG3_GPIO_MSG_NEED_VAUX;
2640
2641         msg = tg3_set_function_status(tp, msg);
2642
2643         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2644                 goto done;
2645
2646         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2647                 tg3_pwrsrc_switch_to_vaux(tp);
2648         else
2649                 tg3_pwrsrc_die_with_vmain(tp);
2650
2651 done:
2652         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2653 }
2654
2655 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2656 {
2657         bool need_vaux = false;
2658
2659         /* The GPIOs do something completely different on 57765. */
2660         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2661                 return;
2662
2663         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2664             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2665             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2666                 tg3_frob_aux_power_5717(tp, include_wol ?
2667                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2668                 return;
2669         }
2670
2671         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2672                 struct net_device *dev_peer;
2673
2674                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2675
2676                 /* remove_one() may have been run on the peer. */
2677                 if (dev_peer) {
2678                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2679
2680                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2681                                 return;
2682
2683                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2684                             tg3_flag(tp_peer, ENABLE_ASF))
2685                                 need_vaux = true;
2686                 }
2687         }
2688
2689         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2690             tg3_flag(tp, ENABLE_ASF))
2691                 need_vaux = true;
2692
2693         if (need_vaux)
2694                 tg3_pwrsrc_switch_to_vaux(tp);
2695         else
2696                 tg3_pwrsrc_die_with_vmain(tp);
2697 }
2698
2699 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2700 {
2701         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2702                 return 1;
2703         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2704                 if (speed != SPEED_10)
2705                         return 1;
2706         } else if (speed == SPEED_10)
2707                 return 1;
2708
2709         return 0;
2710 }
2711
2712 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2713 {
2714         u32 val;
2715
2716         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2717                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2718                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2719                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2720
2721                         sg_dig_ctrl |=
2722                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2723                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2724                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2725                 }
2726                 return;
2727         }
2728
2729         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2730                 tg3_bmcr_reset(tp);
2731                 val = tr32(GRC_MISC_CFG);
2732                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2733                 udelay(40);
2734                 return;
2735         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2736                 u32 phytest;
2737                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2738                         u32 phy;
2739
2740                         tg3_writephy(tp, MII_ADVERTISE, 0);
2741                         tg3_writephy(tp, MII_BMCR,
2742                                      BMCR_ANENABLE | BMCR_ANRESTART);
2743
2744                         tg3_writephy(tp, MII_TG3_FET_TEST,
2745                                      phytest | MII_TG3_FET_SHADOW_EN);
2746                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2747                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2748                                 tg3_writephy(tp,
2749                                              MII_TG3_FET_SHDW_AUXMODE4,
2750                                              phy);
2751                         }
2752                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2753                 }
2754                 return;
2755         } else if (do_low_power) {
2756                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2758
2759                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2760                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2761                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2762                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2763         }
2764
2765         /* The PHY should not be powered down on some chips because
2766          * of bugs.
2767          */
2768         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2769             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2770             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2771              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2772                 return;
2773
2774         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2775             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2776                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2777                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2778                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2779                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2780         }
2781
2782         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2783 }
2784
2785 /* tp->lock is held. */
2786 static int tg3_nvram_lock(struct tg3 *tp)
2787 {
2788         if (tg3_flag(tp, NVRAM)) {
2789                 int i;
2790
2791                 if (tp->nvram_lock_cnt == 0) {
2792                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2793                         for (i = 0; i < 8000; i++) {
2794                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2795                                         break;
2796                                 udelay(20);
2797                         }
2798                         if (i == 8000) {
2799                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2800                                 return -ENODEV;
2801                         }
2802                 }
2803                 tp->nvram_lock_cnt++;
2804         }
2805         return 0;
2806 }
2807
2808 /* tp->lock is held. */
2809 static void tg3_nvram_unlock(struct tg3 *tp)
2810 {
2811         if (tg3_flag(tp, NVRAM)) {
2812                 if (tp->nvram_lock_cnt > 0)
2813                         tp->nvram_lock_cnt--;
2814                 if (tp->nvram_lock_cnt == 0)
2815                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2816         }
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_enable_nvram_access(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2823                 u32 nvaccess = tr32(NVRAM_ACCESS);
2824
2825                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2826         }
2827 }
2828
2829 /* tp->lock is held. */
2830 static void tg3_disable_nvram_access(struct tg3 *tp)
2831 {
2832         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2833                 u32 nvaccess = tr32(NVRAM_ACCESS);
2834
2835                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2836         }
2837 }
2838
2839 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2840                                         u32 offset, u32 *val)
2841 {
2842         u32 tmp;
2843         int i;
2844
2845         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2846                 return -EINVAL;
2847
2848         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2849                                         EEPROM_ADDR_DEVID_MASK |
2850                                         EEPROM_ADDR_READ);
2851         tw32(GRC_EEPROM_ADDR,
2852              tmp |
2853              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2854              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2855               EEPROM_ADDR_ADDR_MASK) |
2856              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2857
2858         for (i = 0; i < 1000; i++) {
2859                 tmp = tr32(GRC_EEPROM_ADDR);
2860
2861                 if (tmp & EEPROM_ADDR_COMPLETE)
2862                         break;
2863                 msleep(1);
2864         }
2865         if (!(tmp & EEPROM_ADDR_COMPLETE))
2866                 return -EBUSY;
2867
2868         tmp = tr32(GRC_EEPROM_DATA);
2869
2870         /*
2871          * The data will always be opposite the native endian
2872          * format.  Perform a blind byteswap to compensate.
2873          */
2874         *val = swab32(tmp);
2875
2876         return 0;
2877 }
2878
2879 #define NVRAM_CMD_TIMEOUT 10000
2880
2881 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2882 {
2883         int i;
2884
2885         tw32(NVRAM_CMD, nvram_cmd);
2886         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2887                 udelay(10);
2888                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2889                         udelay(10);
2890                         break;
2891                 }
2892         }
2893
2894         if (i == NVRAM_CMD_TIMEOUT)
2895                 return -EBUSY;
2896
2897         return 0;
2898 }
2899
2900 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2901 {
2902         if (tg3_flag(tp, NVRAM) &&
2903             tg3_flag(tp, NVRAM_BUFFERED) &&
2904             tg3_flag(tp, FLASH) &&
2905             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2906             (tp->nvram_jedecnum == JEDEC_ATMEL))
2907
2908                 addr = ((addr / tp->nvram_pagesize) <<
2909                         ATMEL_AT45DB0X1B_PAGE_POS) +
2910                        (addr % tp->nvram_pagesize);
2911
2912         return addr;
2913 }
2914
2915 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2916 {
2917         if (tg3_flag(tp, NVRAM) &&
2918             tg3_flag(tp, NVRAM_BUFFERED) &&
2919             tg3_flag(tp, FLASH) &&
2920             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2921             (tp->nvram_jedecnum == JEDEC_ATMEL))
2922
2923                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2924                         tp->nvram_pagesize) +
2925                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2926
2927         return addr;
2928 }
2929
2930 /* NOTE: Data read in from NVRAM is byteswapped according to
2931  * the byteswapping settings for all other register accesses.
2932  * tg3 devices are BE devices, so on a BE machine, the data
2933  * returned will be exactly as it is seen in NVRAM.  On a LE
2934  * machine, the 32-bit value will be byteswapped.
2935  */
2936 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2937 {
2938         int ret;
2939
2940         if (!tg3_flag(tp, NVRAM))
2941                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2942
2943         offset = tg3_nvram_phys_addr(tp, offset);
2944
2945         if (offset > NVRAM_ADDR_MSK)
2946                 return -EINVAL;
2947
2948         ret = tg3_nvram_lock(tp);
2949         if (ret)
2950                 return ret;
2951
2952         tg3_enable_nvram_access(tp);
2953
2954         tw32(NVRAM_ADDR, offset);
2955         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2956                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2957
2958         if (ret == 0)
2959                 *val = tr32(NVRAM_RDDATA);
2960
2961         tg3_disable_nvram_access(tp);
2962
2963         tg3_nvram_unlock(tp);
2964
2965         return ret;
2966 }
2967
2968 /* Ensures NVRAM data is in bytestream format. */
2969 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2970 {
2971         u32 v;
2972         int res = tg3_nvram_read(tp, offset, &v);
2973         if (!res)
2974                 *val = cpu_to_be32(v);
2975         return res;
2976 }
2977
2978 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2979                                     u32 offset, u32 len, u8 *buf)
2980 {
2981         int i, j, rc = 0;
2982         u32 val;
2983
2984         for (i = 0; i < len; i += 4) {
2985                 u32 addr;
2986                 __be32 data;
2987
2988                 addr = offset + i;
2989
2990                 memcpy(&data, buf + i, 4);
2991
2992                 /*
2993                  * The SEEPROM interface expects the data to always be opposite
2994                  * the native endian format.  We accomplish this by reversing
2995                  * all the operations that would have been performed on the
2996                  * data from a call to tg3_nvram_read_be32().
2997                  */
2998                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
2999
3000                 val = tr32(GRC_EEPROM_ADDR);
3001                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3002
3003                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3004                         EEPROM_ADDR_READ);
3005                 tw32(GRC_EEPROM_ADDR, val |
3006                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3007                         (addr & EEPROM_ADDR_ADDR_MASK) |
3008                         EEPROM_ADDR_START |
3009                         EEPROM_ADDR_WRITE);
3010
3011                 for (j = 0; j < 1000; j++) {
3012                         val = tr32(GRC_EEPROM_ADDR);
3013
3014                         if (val & EEPROM_ADDR_COMPLETE)
3015                                 break;
3016                         msleep(1);
3017                 }
3018                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3019                         rc = -EBUSY;
3020                         break;
3021                 }
3022         }
3023
3024         return rc;
3025 }
3026
3027 /* offset and length are dword aligned */
3028 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3029                 u8 *buf)
3030 {
3031         int ret = 0;
3032         u32 pagesize = tp->nvram_pagesize;
3033         u32 pagemask = pagesize - 1;
3034         u32 nvram_cmd;
3035         u8 *tmp;
3036
3037         tmp = kmalloc(pagesize, GFP_KERNEL);
3038         if (tmp == NULL)
3039                 return -ENOMEM;
3040
3041         while (len) {
3042                 int j;
3043                 u32 phy_addr, page_off, size;
3044
3045                 phy_addr = offset & ~pagemask;
3046
3047                 for (j = 0; j < pagesize; j += 4) {
3048                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3049                                                   (__be32 *) (tmp + j));
3050                         if (ret)
3051                                 break;
3052                 }
3053                 if (ret)
3054                         break;
3055
3056                 page_off = offset & pagemask;
3057                 size = pagesize;
3058                 if (len < size)
3059                         size = len;
3060
3061                 len -= size;
3062
3063                 memcpy(tmp + page_off, buf, size);
3064
3065                 offset = offset + (pagesize - page_off);
3066
3067                 tg3_enable_nvram_access(tp);
3068
3069                 /*
3070                  * Before we can erase the flash page, we need
3071                  * to issue a special "write enable" command.
3072                  */
3073                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3074
3075                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3076                         break;
3077
3078                 /* Erase the target page */
3079                 tw32(NVRAM_ADDR, phy_addr);
3080
3081                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3082                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3083
3084                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3085                         break;
3086
3087                 /* Issue another write enable to start the write. */
3088                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3089
3090                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3091                         break;
3092
3093                 for (j = 0; j < pagesize; j += 4) {
3094                         __be32 data;
3095
3096                         data = *((__be32 *) (tmp + j));
3097
3098                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3099
3100                         tw32(NVRAM_ADDR, phy_addr + j);
3101
3102                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3103                                 NVRAM_CMD_WR;
3104
3105                         if (j == 0)
3106                                 nvram_cmd |= NVRAM_CMD_FIRST;
3107                         else if (j == (pagesize - 4))
3108                                 nvram_cmd |= NVRAM_CMD_LAST;
3109
3110                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3111                         if (ret)
3112                                 break;
3113                 }
3114                 if (ret)
3115                         break;
3116         }
3117
3118         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3119         tg3_nvram_exec_cmd(tp, nvram_cmd);
3120
3121         kfree(tmp);
3122
3123         return ret;
3124 }
3125
3126 /* offset and length are dword aligned */
3127 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3128                 u8 *buf)
3129 {
3130         int i, ret = 0;
3131
3132         for (i = 0; i < len; i += 4, offset += 4) {
3133                 u32 page_off, phy_addr, nvram_cmd;
3134                 __be32 data;
3135
3136                 memcpy(&data, buf + i, 4);
3137                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3138
3139                 page_off = offset % tp->nvram_pagesize;
3140
3141                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3142
3143                 tw32(NVRAM_ADDR, phy_addr);
3144
3145                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3146
3147                 if (page_off == 0 || i == 0)
3148                         nvram_cmd |= NVRAM_CMD_FIRST;
3149                 if (page_off == (tp->nvram_pagesize - 4))
3150                         nvram_cmd |= NVRAM_CMD_LAST;
3151
3152                 if (i == (len - 4))
3153                         nvram_cmd |= NVRAM_CMD_LAST;
3154
3155                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3156                     !tg3_flag(tp, 5755_PLUS) &&
3157                     (tp->nvram_jedecnum == JEDEC_ST) &&
3158                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3159                         u32 cmd;
3160
3161                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3162                         ret = tg3_nvram_exec_cmd(tp, cmd);
3163                         if (ret)
3164                                 break;
3165                 }
3166                 if (!tg3_flag(tp, FLASH)) {
3167                         /* We always do complete word writes to eeprom. */
3168                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3169                 }
3170
3171                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3172                 if (ret)
3173                         break;
3174         }
3175         return ret;
3176 }
3177
3178 /* offset and length are dword aligned */
3179 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3180 {
3181         int ret;
3182
3183         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3184                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3185                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3186                 udelay(40);
3187         }
3188
3189         if (!tg3_flag(tp, NVRAM)) {
3190                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3191         } else {
3192                 u32 grc_mode;
3193
3194                 ret = tg3_nvram_lock(tp);
3195                 if (ret)
3196                         return ret;
3197
3198                 tg3_enable_nvram_access(tp);
3199                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3200                         tw32(NVRAM_WRITE1, 0x406);
3201
3202                 grc_mode = tr32(GRC_MODE);
3203                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3204
3205                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3206                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3207                                 buf);
3208                 } else {
3209                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3210                                 buf);
3211                 }
3212
3213                 grc_mode = tr32(GRC_MODE);
3214                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3215
3216                 tg3_disable_nvram_access(tp);
3217                 tg3_nvram_unlock(tp);
3218         }
3219
3220         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3221                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3222                 udelay(40);
3223         }
3224
3225         return ret;
3226 }
3227
3228 #define RX_CPU_SCRATCH_BASE     0x30000
3229 #define RX_CPU_SCRATCH_SIZE     0x04000
3230 #define TX_CPU_SCRATCH_BASE     0x34000
3231 #define TX_CPU_SCRATCH_SIZE     0x04000
3232
3233 /* tp->lock is held. */
3234 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3235 {
3236         int i;
3237
3238         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3239
3240         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3241                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3242
3243                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3244                 return 0;
3245         }
3246         if (offset == RX_CPU_BASE) {
3247                 for (i = 0; i < 10000; i++) {
3248                         tw32(offset + CPU_STATE, 0xffffffff);
3249                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3250                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3251                                 break;
3252                 }
3253
3254                 tw32(offset + CPU_STATE, 0xffffffff);
3255                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3256                 udelay(10);
3257         } else {
3258                 for (i = 0; i < 10000; i++) {
3259                         tw32(offset + CPU_STATE, 0xffffffff);
3260                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3261                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3262                                 break;
3263                 }
3264         }
3265
3266         if (i >= 10000) {
3267                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3268                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3269                 return -ENODEV;
3270         }
3271
3272         /* Clear firmware's nvram arbitration. */
3273         if (tg3_flag(tp, NVRAM))
3274                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3275         return 0;
3276 }
3277
3278 struct fw_info {
3279         unsigned int fw_base;
3280         unsigned int fw_len;
3281         const __be32 *fw_data;
3282 };
3283
3284 /* tp->lock is held. */
3285 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3286                                  u32 cpu_scratch_base, int cpu_scratch_size,
3287                                  struct fw_info *info)
3288 {
3289         int err, lock_err, i;
3290         void (*write_op)(struct tg3 *, u32, u32);
3291
3292         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3293                 netdev_err(tp->dev,
3294                            "%s: Trying to load TX cpu firmware which is 5705\n",
3295                            __func__);
3296                 return -EINVAL;
3297         }
3298
3299         if (tg3_flag(tp, 5705_PLUS))
3300                 write_op = tg3_write_mem;
3301         else
3302                 write_op = tg3_write_indirect_reg32;
3303
3304         /* It is possible that bootcode is still loading at this point.
3305          * Get the nvram lock first before halting the cpu.
3306          */
3307         lock_err = tg3_nvram_lock(tp);
3308         err = tg3_halt_cpu(tp, cpu_base);
3309         if (!lock_err)
3310                 tg3_nvram_unlock(tp);
3311         if (err)
3312                 goto out;
3313
3314         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3315                 write_op(tp, cpu_scratch_base + i, 0);
3316         tw32(cpu_base + CPU_STATE, 0xffffffff);
3317         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3318         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3319                 write_op(tp, (cpu_scratch_base +
3320                               (info->fw_base & 0xffff) +
3321                               (i * sizeof(u32))),
3322                               be32_to_cpu(info->fw_data[i]));
3323
3324         err = 0;
3325
3326 out:
3327         return err;
3328 }
3329
3330 /* tp->lock is held. */
3331 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3332 {
3333         struct fw_info info;
3334         const __be32 *fw_data;
3335         int err, i;
3336
3337         fw_data = (void *)tp->fw->data;
3338
3339         /* Firmware blob starts with version numbers, followed by
3340            start address and length. We are setting complete length.
3341            length = end_address_of_bss - start_address_of_text.
3342            Remainder is the blob to be loaded contiguously
3343            from start address. */
3344
3345         info.fw_base = be32_to_cpu(fw_data[1]);
3346         info.fw_len = tp->fw->size - 12;
3347         info.fw_data = &fw_data[3];
3348
3349         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3350                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3351                                     &info);
3352         if (err)
3353                 return err;
3354
3355         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3356                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3357                                     &info);
3358         if (err)
3359                 return err;
3360
3361         /* Now startup only the RX cpu. */
3362         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3363         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3364
3365         for (i = 0; i < 5; i++) {
3366                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3367                         break;
3368                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3369                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3370                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3371                 udelay(1000);
3372         }
3373         if (i >= 5) {
3374                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3375                            "should be %08x\n", __func__,
3376                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3377                 return -ENODEV;
3378         }
3379         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3380         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3381
3382         return 0;
3383 }
3384
3385 /* tp->lock is held. */
3386 static int tg3_load_tso_firmware(struct tg3 *tp)
3387 {
3388         struct fw_info info;
3389         const __be32 *fw_data;
3390         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3391         int err, i;
3392
3393         if (tg3_flag(tp, HW_TSO_1) ||
3394             tg3_flag(tp, HW_TSO_2) ||
3395             tg3_flag(tp, HW_TSO_3))
3396                 return 0;
3397
3398         fw_data = (void *)tp->fw->data;
3399
3400         /* Firmware blob starts with version numbers, followed by
3401            start address and length. We are setting complete length.
3402            length = end_address_of_bss - start_address_of_text.
3403            Remainder is the blob to be loaded contiguously
3404            from start address. */
3405
3406         info.fw_base = be32_to_cpu(fw_data[1]);
3407         cpu_scratch_size = tp->fw_len;
3408         info.fw_len = tp->fw->size - 12;
3409         info.fw_data = &fw_data[3];
3410
3411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3412                 cpu_base = RX_CPU_BASE;
3413                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3414         } else {
3415                 cpu_base = TX_CPU_BASE;
3416                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3417                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3418         }
3419
3420         err = tg3_load_firmware_cpu(tp, cpu_base,
3421                                     cpu_scratch_base, cpu_scratch_size,
3422                                     &info);
3423         if (err)
3424                 return err;
3425
3426         /* Now startup the cpu. */
3427         tw32(cpu_base + CPU_STATE, 0xffffffff);
3428         tw32_f(cpu_base + CPU_PC, info.fw_base);
3429
3430         for (i = 0; i < 5; i++) {
3431                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3432                         break;
3433                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3434                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3435                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3436                 udelay(1000);
3437         }
3438         if (i >= 5) {
3439                 netdev_err(tp->dev,
3440                            "%s fails to set CPU PC, is %08x should be %08x\n",
3441                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3442                 return -ENODEV;
3443         }
3444         tw32(cpu_base + CPU_STATE, 0xffffffff);
3445         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3446         return 0;
3447 }
3448
3449
3450 /* tp->lock is held. */
3451 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3452 {
3453         u32 addr_high, addr_low;
3454         int i;
3455
3456         addr_high = ((tp->dev->dev_addr[0] << 8) |
3457                      tp->dev->dev_addr[1]);
3458         addr_low = ((tp->dev->dev_addr[2] << 24) |
3459                     (tp->dev->dev_addr[3] << 16) |
3460                     (tp->dev->dev_addr[4] <<  8) |
3461                     (tp->dev->dev_addr[5] <<  0));
3462         for (i = 0; i < 4; i++) {
3463                 if (i == 1 && skip_mac_1)
3464                         continue;
3465                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3466                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3467         }
3468
3469         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3470             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3471                 for (i = 0; i < 12; i++) {
3472                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3473                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3474                 }
3475         }
3476
3477         addr_high = (tp->dev->dev_addr[0] +
3478                      tp->dev->dev_addr[1] +
3479                      tp->dev->dev_addr[2] +
3480                      tp->dev->dev_addr[3] +
3481                      tp->dev->dev_addr[4] +
3482                      tp->dev->dev_addr[5]) &
3483                 TX_BACKOFF_SEED_MASK;
3484         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3485 }
3486
3487 static void tg3_enable_register_access(struct tg3 *tp)
3488 {
3489         /*
3490          * Make sure register accesses (indirect or otherwise) will function
3491          * correctly.
3492          */
3493         pci_write_config_dword(tp->pdev,
3494                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3495 }
3496
3497 static int tg3_power_up(struct tg3 *tp)
3498 {
3499         int err;
3500
3501         tg3_enable_register_access(tp);
3502
3503         err = pci_set_power_state(tp->pdev, PCI_D0);
3504         if (!err) {
3505                 /* Switch out of Vaux if it is a NIC */
3506                 tg3_pwrsrc_switch_to_vmain(tp);
3507         } else {
3508                 netdev_err(tp->dev, "Transition to D0 failed\n");
3509         }
3510
3511         return err;
3512 }
3513
3514 static int tg3_setup_phy(struct tg3 *, int);
3515
3516 static int tg3_power_down_prepare(struct tg3 *tp)
3517 {
3518         u32 misc_host_ctrl;
3519         bool device_should_wake, do_low_power;
3520
3521         tg3_enable_register_access(tp);
3522
3523         /* Restore the CLKREQ setting. */
3524         if (tg3_flag(tp, CLKREQ_BUG)) {
3525                 u16 lnkctl;
3526
3527                 pci_read_config_word(tp->pdev,
3528                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3529                                      &lnkctl);
3530                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3531                 pci_write_config_word(tp->pdev,
3532                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3533                                       lnkctl);
3534         }
3535
3536         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3537         tw32(TG3PCI_MISC_HOST_CTRL,
3538              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3539
3540         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3541                              tg3_flag(tp, WOL_ENABLE);
3542
3543         if (tg3_flag(tp, USE_PHYLIB)) {
3544                 do_low_power = false;
3545                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3546                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3547                         struct phy_device *phydev;
3548                         u32 phyid, advertising;
3549
3550                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3551
3552                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3553
3554                         tp->link_config.orig_speed = phydev->speed;
3555                         tp->link_config.orig_duplex = phydev->duplex;
3556                         tp->link_config.orig_autoneg = phydev->autoneg;
3557                         tp->link_config.orig_advertising = phydev->advertising;
3558
3559                         advertising = ADVERTISED_TP |
3560                                       ADVERTISED_Pause |
3561                                       ADVERTISED_Autoneg |
3562                                       ADVERTISED_10baseT_Half;
3563
3564                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3565                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3566                                         advertising |=
3567                                                 ADVERTISED_100baseT_Half |
3568                                                 ADVERTISED_100baseT_Full |
3569                                                 ADVERTISED_10baseT_Full;
3570                                 else
3571                                         advertising |= ADVERTISED_10baseT_Full;
3572                         }
3573
3574                         phydev->advertising = advertising;
3575
3576                         phy_start_aneg(phydev);
3577
3578                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3579                         if (phyid != PHY_ID_BCMAC131) {
3580                                 phyid &= PHY_BCM_OUI_MASK;
3581                                 if (phyid == PHY_BCM_OUI_1 ||
3582                                     phyid == PHY_BCM_OUI_2 ||
3583                                     phyid == PHY_BCM_OUI_3)
3584                                         do_low_power = true;
3585                         }
3586                 }
3587         } else {
3588                 do_low_power = true;
3589
3590                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3591                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3592                         tp->link_config.orig_speed = tp->link_config.speed;
3593                         tp->link_config.orig_duplex = tp->link_config.duplex;
3594                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3595                 }
3596
3597                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3598                         tp->link_config.speed = SPEED_10;
3599                         tp->link_config.duplex = DUPLEX_HALF;
3600                         tp->link_config.autoneg = AUTONEG_ENABLE;
3601                         tg3_setup_phy(tp, 0);
3602                 }
3603         }
3604
3605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3606                 u32 val;
3607
3608                 val = tr32(GRC_VCPU_EXT_CTRL);
3609                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3610         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3611                 int i;
3612                 u32 val;
3613
3614                 for (i = 0; i < 200; i++) {
3615                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3616                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3617                                 break;
3618                         msleep(1);
3619                 }
3620         }
3621         if (tg3_flag(tp, WOL_CAP))
3622                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3623                                                      WOL_DRV_STATE_SHUTDOWN |
3624                                                      WOL_DRV_WOL |
3625                                                      WOL_SET_MAGIC_PKT);
3626
3627         if (device_should_wake) {
3628                 u32 mac_mode;
3629
3630                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3631                         if (do_low_power &&
3632                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3633                                 tg3_phy_auxctl_write(tp,
3634                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3635                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3636                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3637                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3638                                 udelay(40);
3639                         }
3640
3641                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3642                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3643                         else
3644                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3645
3646                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3647                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3648                             ASIC_REV_5700) {
3649                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3650                                              SPEED_100 : SPEED_10;
3651                                 if (tg3_5700_link_polarity(tp, speed))
3652                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3653                                 else
3654                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3655                         }
3656                 } else {
3657                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3658                 }
3659
3660                 if (!tg3_flag(tp, 5750_PLUS))
3661                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3662
3663                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3664                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3665                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3666                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3667
3668                 if (tg3_flag(tp, ENABLE_APE))
3669                         mac_mode |= MAC_MODE_APE_TX_EN |
3670                                     MAC_MODE_APE_RX_EN |
3671                                     MAC_MODE_TDE_ENABLE;
3672
3673                 tw32_f(MAC_MODE, mac_mode);
3674                 udelay(100);
3675
3676                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3677                 udelay(10);
3678         }
3679
3680         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3681             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3682              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3683                 u32 base_val;
3684
3685                 base_val = tp->pci_clock_ctrl;
3686                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3687                              CLOCK_CTRL_TXCLK_DISABLE);
3688
3689                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3690                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3691         } else if (tg3_flag(tp, 5780_CLASS) ||
3692                    tg3_flag(tp, CPMU_PRESENT) ||
3693                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3694                 /* do nothing */
3695         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3696                 u32 newbits1, newbits2;
3697
3698                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3699                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3700                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3701                                     CLOCK_CTRL_TXCLK_DISABLE |
3702                                     CLOCK_CTRL_ALTCLK);
3703                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3704                 } else if (tg3_flag(tp, 5705_PLUS)) {
3705                         newbits1 = CLOCK_CTRL_625_CORE;
3706                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3707                 } else {
3708                         newbits1 = CLOCK_CTRL_ALTCLK;
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 }
3711
3712                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3713                             40);
3714
3715                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3716                             40);
3717
3718                 if (!tg3_flag(tp, 5705_PLUS)) {
3719                         u32 newbits3;
3720
3721                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3722                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3723                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3724                                             CLOCK_CTRL_TXCLK_DISABLE |
3725                                             CLOCK_CTRL_44MHZ_CORE);
3726                         } else {
3727                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3728                         }
3729
3730                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3731                                     tp->pci_clock_ctrl | newbits3, 40);
3732                 }
3733         }
3734
3735         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3736                 tg3_power_down_phy(tp, do_low_power);
3737
3738         tg3_frob_aux_power(tp, true);
3739
3740         /* Workaround for unstable PLL clock */
3741         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3742             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3743                 u32 val = tr32(0x7d00);
3744
3745                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3746                 tw32(0x7d00, val);
3747                 if (!tg3_flag(tp, ENABLE_ASF)) {
3748                         int err;
3749
3750                         err = tg3_nvram_lock(tp);
3751                         tg3_halt_cpu(tp, RX_CPU_BASE);
3752                         if (!err)
3753                                 tg3_nvram_unlock(tp);
3754                 }
3755         }
3756
3757         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3758
3759         return 0;
3760 }
3761
3762 static void tg3_power_down(struct tg3 *tp)
3763 {
3764         tg3_power_down_prepare(tp);
3765
3766         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3767         pci_set_power_state(tp->pdev, PCI_D3hot);
3768 }
3769
3770 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3771 {
3772         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3773         case MII_TG3_AUX_STAT_10HALF:
3774                 *speed = SPEED_10;
3775                 *duplex = DUPLEX_HALF;
3776                 break;
3777
3778         case MII_TG3_AUX_STAT_10FULL:
3779                 *speed = SPEED_10;
3780                 *duplex = DUPLEX_FULL;
3781                 break;
3782
3783         case MII_TG3_AUX_STAT_100HALF:
3784                 *speed = SPEED_100;
3785                 *duplex = DUPLEX_HALF;
3786                 break;
3787
3788         case MII_TG3_AUX_STAT_100FULL:
3789                 *speed = SPEED_100;
3790                 *duplex = DUPLEX_FULL;
3791                 break;
3792
3793         case MII_TG3_AUX_STAT_1000HALF:
3794                 *speed = SPEED_1000;
3795                 *duplex = DUPLEX_HALF;
3796                 break;
3797
3798         case MII_TG3_AUX_STAT_1000FULL:
3799                 *speed = SPEED_1000;
3800                 *duplex = DUPLEX_FULL;
3801                 break;
3802
3803         default:
3804                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3805                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3806                                  SPEED_10;
3807                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3808                                   DUPLEX_HALF;
3809                         break;
3810                 }
3811                 *speed = SPEED_INVALID;
3812                 *duplex = DUPLEX_INVALID;
3813                 break;
3814         }
3815 }
3816
3817 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3818 {
3819         int err = 0;
3820         u32 val, new_adv;
3821
3822         new_adv = ADVERTISE_CSMA;
3823         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3824         new_adv |= mii_advertise_flowctrl(flowctrl);
3825
3826         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3827         if (err)
3828                 goto done;
3829
3830         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3831                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3832
3833                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3834                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3835                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3836
3837                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3838                 if (err)
3839                         goto done;
3840         }
3841
3842         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3843                 goto done;
3844
3845         tw32(TG3_CPMU_EEE_MODE,
3846              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3847
3848         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3849         if (!err) {
3850                 u32 err2;
3851
3852                 val = 0;
3853                 /* Advertise 100-BaseTX EEE ability */
3854                 if (advertise & ADVERTISED_100baseT_Full)
3855                         val |= MDIO_AN_EEE_ADV_100TX;
3856                 /* Advertise 1000-BaseT EEE ability */
3857                 if (advertise & ADVERTISED_1000baseT_Full)
3858                         val |= MDIO_AN_EEE_ADV_1000T;
3859                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3860                 if (err)
3861                         val = 0;
3862
3863                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3864                 case ASIC_REV_5717:
3865                 case ASIC_REV_57765:
3866                 case ASIC_REV_57766:
3867                 case ASIC_REV_5719:
3868                         /* If we advertised any eee advertisements above... */
3869                         if (val)
3870                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3871                                       MII_TG3_DSP_TAP26_RMRXSTO |
3872                                       MII_TG3_DSP_TAP26_OPCSINPT;
3873                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3874                         /* Fall through */
3875                 case ASIC_REV_5720:
3876                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3877                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3878                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3879                 }
3880
3881                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3882                 if (!err)
3883                         err = err2;
3884         }
3885
3886 done:
3887         return err;
3888 }
3889
3890 static void tg3_phy_copper_begin(struct tg3 *tp)
3891 {
3892         u32 new_adv;
3893         int i;
3894
3895         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3896                 new_adv = ADVERTISED_10baseT_Half |
3897                           ADVERTISED_10baseT_Full;
3898                 if (tg3_flag(tp, WOL_SPEED_100MB))
3899                         new_adv |= ADVERTISED_100baseT_Half |
3900                                    ADVERTISED_100baseT_Full;
3901
3902                 tg3_phy_autoneg_cfg(tp, new_adv,
3903                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3904         } else if (tp->link_config.speed == SPEED_INVALID) {
3905                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3906                         tp->link_config.advertising &=
3907                                 ~(ADVERTISED_1000baseT_Half |
3908                                   ADVERTISED_1000baseT_Full);
3909
3910                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3911                                     tp->link_config.flowctrl);
3912         } else {
3913                 /* Asking for a specific link mode. */
3914                 if (tp->link_config.speed == SPEED_1000) {
3915                         if (tp->link_config.duplex == DUPLEX_FULL)
3916                                 new_adv = ADVERTISED_1000baseT_Full;
3917                         else
3918                                 new_adv = ADVERTISED_1000baseT_Half;
3919                 } else if (tp->link_config.speed == SPEED_100) {
3920                         if (tp->link_config.duplex == DUPLEX_FULL)
3921                                 new_adv = ADVERTISED_100baseT_Full;
3922                         else
3923                                 new_adv = ADVERTISED_100baseT_Half;
3924                 } else {
3925                         if (tp->link_config.duplex == DUPLEX_FULL)
3926                                 new_adv = ADVERTISED_10baseT_Full;
3927                         else
3928                                 new_adv = ADVERTISED_10baseT_Half;
3929                 }
3930
3931                 tg3_phy_autoneg_cfg(tp, new_adv,
3932                                     tp->link_config.flowctrl);
3933         }
3934
3935         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3936             tp->link_config.speed != SPEED_INVALID) {
3937                 u32 bmcr, orig_bmcr;
3938
3939                 tp->link_config.active_speed = tp->link_config.speed;
3940                 tp->link_config.active_duplex = tp->link_config.duplex;
3941
3942                 bmcr = 0;
3943                 switch (tp->link_config.speed) {
3944                 default:
3945                 case SPEED_10:
3946                         break;
3947
3948                 case SPEED_100:
3949                         bmcr |= BMCR_SPEED100;
3950                         break;
3951
3952                 case SPEED_1000:
3953                         bmcr |= BMCR_SPEED1000;
3954                         break;
3955                 }
3956
3957                 if (tp->link_config.duplex == DUPLEX_FULL)
3958                         bmcr |= BMCR_FULLDPLX;
3959
3960                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3961                     (bmcr != orig_bmcr)) {
3962                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3963                         for (i = 0; i < 1500; i++) {
3964                                 u32 tmp;
3965
3966                                 udelay(10);
3967                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3968                                     tg3_readphy(tp, MII_BMSR, &tmp))
3969                                         continue;
3970                                 if (!(tmp & BMSR_LSTATUS)) {
3971                                         udelay(40);
3972                                         break;
3973                                 }
3974                         }
3975                         tg3_writephy(tp, MII_BMCR, bmcr);
3976                         udelay(40);
3977                 }
3978         } else {
3979                 tg3_writephy(tp, MII_BMCR,
3980                              BMCR_ANENABLE | BMCR_ANRESTART);
3981         }
3982 }
3983
3984 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3985 {
3986         int err;
3987
3988         /* Turn off tap power management. */
3989         /* Set Extended packet length bit */
3990         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3991
3992         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3993         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3994         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3995         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3996         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3997
3998         udelay(40);
3999
4000         return err;
4001 }
4002
4003 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4004 {
4005         u32 advmsk, tgtadv, advertising;
4006
4007         advertising = tp->link_config.advertising;
4008         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4009
4010         advmsk = ADVERTISE_ALL;
4011         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4012                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4013                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4014         }
4015
4016         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4017                 return false;
4018
4019         if ((*lcladv & advmsk) != tgtadv)
4020                 return false;
4021
4022         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4023                 u32 tg3_ctrl;
4024
4025                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4026
4027                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4028                         return false;
4029
4030                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4031                 if (tg3_ctrl != tgtadv)
4032                         return false;
4033         }
4034
4035         return true;
4036 }
4037
4038 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4039 {
4040         u32 lpeth = 0;
4041
4042         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4043                 u32 val;
4044
4045                 if (tg3_readphy(tp, MII_STAT1000, &val))
4046                         return false;
4047
4048                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4049         }
4050
4051         if (tg3_readphy(tp, MII_LPA, rmtadv))
4052                 return false;
4053
4054         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055         tp->link_config.rmt_adv = lpeth;
4056
4057         return true;
4058 }
4059
4060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4061 {
4062         int current_link_up;
4063         u32 bmsr, val;
4064         u32 lcl_adv, rmt_adv;
4065         u16 current_speed;
4066         u8 current_duplex;
4067         int i, err;
4068
4069         tw32(MAC_EVENT, 0);
4070
4071         tw32_f(MAC_STATUS,
4072              (MAC_STATUS_SYNC_CHANGED |
4073               MAC_STATUS_CFG_CHANGED |
4074               MAC_STATUS_MI_COMPLETION |
4075               MAC_STATUS_LNKSTATE_CHANGED));
4076         udelay(40);
4077
4078         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4079                 tw32_f(MAC_MI_MODE,
4080                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4081                 udelay(80);
4082         }
4083
4084         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4085
4086         /* Some third-party PHYs need to be reset on link going
4087          * down.
4088          */
4089         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092             netif_carrier_ok(tp->dev)) {
4093                 tg3_readphy(tp, MII_BMSR, &bmsr);
4094                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095                     !(bmsr & BMSR_LSTATUS))
4096                         force_reset = 1;
4097         }
4098         if (force_reset)
4099                 tg3_phy_reset(tp);
4100
4101         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102                 tg3_readphy(tp, MII_BMSR, &bmsr);
4103                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104                     !tg3_flag(tp, INIT_COMPLETE))
4105                         bmsr = 0;
4106
4107                 if (!(bmsr & BMSR_LSTATUS)) {
4108                         err = tg3_init_5401phy_dsp(tp);
4109                         if (err)
4110                                 return err;
4111
4112                         tg3_readphy(tp, MII_BMSR, &bmsr);
4113                         for (i = 0; i < 1000; i++) {
4114                                 udelay(10);
4115                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116                                     (bmsr & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121
4122                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123                             TG3_PHY_REV_BCM5401_B0 &&
4124                             !(bmsr & BMSR_LSTATUS) &&
4125                             tp->link_config.active_speed == SPEED_1000) {
4126                                 err = tg3_phy_reset(tp);
4127                                 if (!err)
4128                                         err = tg3_init_5401phy_dsp(tp);
4129                                 if (err)
4130                                         return err;
4131                         }
4132                 }
4133         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135                 /* 5701 {A0,B0} CRC bug workaround */
4136                 tg3_writephy(tp, 0x15, 0x0a75);
4137                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4140         }
4141
4142         /* Clear pending interrupts... */
4143         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4145
4146         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4150
4151         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4156                 else
4157                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4158         }
4159
4160         current_link_up = 0;
4161         current_speed = SPEED_INVALID;
4162         current_duplex = DUPLEX_INVALID;
4163         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164         tp->link_config.rmt_adv = 0;
4165
4166         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167                 err = tg3_phy_auxctl_read(tp,
4168                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4169                                           &val);
4170                 if (!err && !(val & (1 << 10))) {
4171                         tg3_phy_auxctl_write(tp,
4172                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173                                              val | (1 << 10));
4174                         goto relink;
4175                 }
4176         }
4177
4178         bmsr = 0;
4179         for (i = 0; i < 100; i++) {
4180                 tg3_readphy(tp, MII_BMSR, &bmsr);
4181                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182                     (bmsr & BMSR_LSTATUS))
4183                         break;
4184                 udelay(40);
4185         }
4186
4187         if (bmsr & BMSR_LSTATUS) {
4188                 u32 aux_stat, bmcr;
4189
4190                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191                 for (i = 0; i < 2000; i++) {
4192                         udelay(10);
4193                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4194                             aux_stat)
4195                                 break;
4196                 }
4197
4198                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4199                                              &current_speed,
4200                                              &current_duplex);
4201
4202                 bmcr = 0;
4203                 for (i = 0; i < 200; i++) {
4204                         tg3_readphy(tp, MII_BMCR, &bmcr);
4205                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4206                                 continue;
4207                         if (bmcr && bmcr != 0x7fff)
4208                                 break;
4209                         udelay(10);
4210                 }
4211
4212                 lcl_adv = 0;
4213                 rmt_adv = 0;
4214
4215                 tp->link_config.active_speed = current_speed;
4216                 tp->link_config.active_duplex = current_duplex;
4217
4218                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219                         if ((bmcr & BMCR_ANENABLE) &&
4220                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222                                 current_link_up = 1;
4223                 } else {
4224                         if (!(bmcr & BMCR_ANENABLE) &&
4225                             tp->link_config.speed == current_speed &&
4226                             tp->link_config.duplex == current_duplex &&
4227                             tp->link_config.flowctrl ==
4228                             tp->link_config.active_flowctrl) {
4229                                 current_link_up = 1;
4230                         }
4231                 }
4232
4233                 if (current_link_up == 1 &&
4234                     tp->link_config.active_duplex == DUPLEX_FULL) {
4235                         u32 reg, bit;
4236
4237                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238                                 reg = MII_TG3_FET_GEN_STAT;
4239                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4240                         } else {
4241                                 reg = MII_TG3_EXT_STAT;
4242                                 bit = MII_TG3_EXT_STAT_MDIX;
4243                         }
4244
4245                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4247
4248                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4249                 }
4250         }
4251
4252 relink:
4253         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254                 tg3_phy_copper_begin(tp);
4255
4256                 tg3_readphy(tp, MII_BMSR, &bmsr);
4257                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259                         current_link_up = 1;
4260         }
4261
4262         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263         if (current_link_up == 1) {
4264                 if (tp->link_config.active_speed == SPEED_100 ||
4265                     tp->link_config.active_speed == SPEED_10)
4266                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4267                 else
4268                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4271         else
4272                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4273
4274         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275         if (tp->link_config.active_duplex == DUPLEX_HALF)
4276                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4277
4278         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279                 if (current_link_up == 1 &&
4280                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4282                 else
4283                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4284         }
4285
4286         /* ??? Without this setting Netgear GA302T PHY does not
4287          * ??? send/receive packets...
4288          */
4289         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4293                 udelay(80);
4294         }
4295
4296         tw32_f(MAC_MODE, tp->mac_mode);
4297         udelay(40);
4298
4299         tg3_phy_eee_adjust(tp, current_link_up);
4300
4301         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302                 /* Polled via timer. */
4303                 tw32_f(MAC_EVENT, 0);
4304         } else {
4305                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4306         }
4307         udelay(40);
4308
4309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310             current_link_up == 1 &&
4311             tp->link_config.active_speed == SPEED_1000 &&
4312             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4313                 udelay(120);
4314                 tw32_f(MAC_STATUS,
4315                      (MAC_STATUS_SYNC_CHANGED |
4316                       MAC_STATUS_CFG_CHANGED));
4317                 udelay(40);
4318                 tg3_write_mem(tp,
4319                               NIC_SRAM_FIRMWARE_MBOX,
4320                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4321         }
4322
4323         /* Prevent send BD corruption. */
4324         if (tg3_flag(tp, CLKREQ_BUG)) {
4325                 u16 oldlnkctl, newlnkctl;
4326
4327                 pci_read_config_word(tp->pdev,
4328                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4329                                      &oldlnkctl);
4330                 if (tp->link_config.active_speed == SPEED_100 ||
4331                     tp->link_config.active_speed == SPEED_10)
4332                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4333                 else
4334                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335                 if (newlnkctl != oldlnkctl)
4336                         pci_write_config_word(tp->pdev,
4337                                               pci_pcie_cap(tp->pdev) +
4338                                               PCI_EXP_LNKCTL, newlnkctl);
4339         }
4340
4341         if (current_link_up != netif_carrier_ok(tp->dev)) {
4342                 if (current_link_up)
4343                         netif_carrier_on(tp->dev);
4344                 else
4345                         netif_carrier_off(tp->dev);
4346                 tg3_link_report(tp);
4347         }
4348
4349         return 0;
4350 }
4351
4352 struct tg3_fiber_aneginfo {
4353         int state;
4354 #define ANEG_STATE_UNKNOWN              0
4355 #define ANEG_STATE_AN_ENABLE            1
4356 #define ANEG_STATE_RESTART_INIT         2
4357 #define ANEG_STATE_RESTART              3
4358 #define ANEG_STATE_DISABLE_LINK_OK      4
4359 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4360 #define ANEG_STATE_ABILITY_DETECT       6
4361 #define ANEG_STATE_ACK_DETECT_INIT      7
4362 #define ANEG_STATE_ACK_DETECT           8
4363 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4364 #define ANEG_STATE_COMPLETE_ACK         10
4365 #define ANEG_STATE_IDLE_DETECT_INIT     11
4366 #define ANEG_STATE_IDLE_DETECT          12
4367 #define ANEG_STATE_LINK_OK              13
4368 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4369 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4370
4371         u32 flags;
4372 #define MR_AN_ENABLE            0x00000001
4373 #define MR_RESTART_AN           0x00000002
4374 #define MR_AN_COMPLETE          0x00000004
4375 #define MR_PAGE_RX              0x00000008
4376 #define MR_NP_LOADED            0x00000010
4377 #define MR_TOGGLE_TX            0x00000020
4378 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4379 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4380 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4381 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4382 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4383 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4384 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4385 #define MR_TOGGLE_RX            0x00002000
4386 #define MR_NP_RX                0x00004000
4387
4388 #define MR_LINK_OK              0x80000000
4389
4390         unsigned long link_time, cur_time;
4391
4392         u32 ability_match_cfg;
4393         int ability_match_count;
4394
4395         char ability_match, idle_match, ack_match;
4396
4397         u32 txconfig, rxconfig;
4398 #define ANEG_CFG_NP             0x00000080
4399 #define ANEG_CFG_ACK            0x00000040
4400 #define ANEG_CFG_RF2            0x00000020
4401 #define ANEG_CFG_RF1            0x00000010
4402 #define ANEG_CFG_PS2            0x00000001
4403 #define ANEG_CFG_PS1            0x00008000
4404 #define ANEG_CFG_HD             0x00004000
4405 #define ANEG_CFG_FD             0x00002000
4406 #define ANEG_CFG_INVAL          0x00001f06
4407
4408 };
4409 #define ANEG_OK         0
4410 #define ANEG_DONE       1
4411 #define ANEG_TIMER_ENAB 2
4412 #define ANEG_FAILED     -1
4413
4414 #define ANEG_STATE_SETTLE_TIME  10000
4415
4416 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417                                    struct tg3_fiber_aneginfo *ap)
4418 {
4419         u16 flowctrl;
4420         unsigned long delta;
4421         u32 rx_cfg_reg;
4422         int ret;
4423
4424         if (ap->state == ANEG_STATE_UNKNOWN) {
4425                 ap->rxconfig = 0;
4426                 ap->link_time = 0;
4427                 ap->cur_time = 0;
4428                 ap->ability_match_cfg = 0;
4429                 ap->ability_match_count = 0;
4430                 ap->ability_match = 0;
4431                 ap->idle_match = 0;
4432                 ap->ack_match = 0;
4433         }
4434         ap->cur_time++;
4435
4436         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4438
4439                 if (rx_cfg_reg != ap->ability_match_cfg) {
4440                         ap->ability_match_cfg = rx_cfg_reg;
4441                         ap->ability_match = 0;
4442                         ap->ability_match_count = 0;
4443                 } else {
4444                         if (++ap->ability_match_count > 1) {
4445                                 ap->ability_match = 1;
4446                                 ap->ability_match_cfg = rx_cfg_reg;
4447                         }
4448                 }
4449                 if (rx_cfg_reg & ANEG_CFG_ACK)
4450                         ap->ack_match = 1;
4451                 else
4452                         ap->ack_match = 0;
4453
4454                 ap->idle_match = 0;
4455         } else {
4456                 ap->idle_match = 1;
4457                 ap->ability_match_cfg = 0;
4458                 ap->ability_match_count = 0;
4459                 ap->ability_match = 0;
4460                 ap->ack_match = 0;
4461
4462                 rx_cfg_reg = 0;
4463         }
4464
4465         ap->rxconfig = rx_cfg_reg;
4466         ret = ANEG_OK;
4467
4468         switch (ap->state) {
4469         case ANEG_STATE_UNKNOWN:
4470                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471                         ap->state = ANEG_STATE_AN_ENABLE;
4472
4473                 /* fallthru */
4474         case ANEG_STATE_AN_ENABLE:
4475                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476                 if (ap->flags & MR_AN_ENABLE) {
4477                         ap->link_time = 0;
4478                         ap->cur_time = 0;
4479                         ap->ability_match_cfg = 0;
4480                         ap->ability_match_count = 0;
4481                         ap->ability_match = 0;
4482                         ap->idle_match = 0;
4483                         ap->ack_match = 0;
4484
4485                         ap->state = ANEG_STATE_RESTART_INIT;
4486                 } else {
4487                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4488                 }
4489                 break;
4490
4491         case ANEG_STATE_RESTART_INIT:
4492                 ap->link_time = ap->cur_time;
4493                 ap->flags &= ~(MR_NP_LOADED);
4494                 ap->txconfig = 0;
4495                 tw32(MAC_TX_AUTO_NEG, 0);
4496                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497                 tw32_f(MAC_MODE, tp->mac_mode);
4498                 udelay(40);
4499
4500                 ret = ANEG_TIMER_ENAB;
4501                 ap->state = ANEG_STATE_RESTART;
4502
4503                 /* fallthru */
4504         case ANEG_STATE_RESTART:
4505                 delta = ap->cur_time - ap->link_time;
4506                 if (delta > ANEG_STATE_SETTLE_TIME)
4507                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4508                 else
4509                         ret = ANEG_TIMER_ENAB;
4510                 break;
4511
4512         case ANEG_STATE_DISABLE_LINK_OK:
4513                 ret = ANEG_DONE;
4514                 break;
4515
4516         case ANEG_STATE_ABILITY_DETECT_INIT:
4517                 ap->flags &= ~(MR_TOGGLE_TX);
4518                 ap->txconfig = ANEG_CFG_FD;
4519                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520                 if (flowctrl & ADVERTISE_1000XPAUSE)
4521                         ap->txconfig |= ANEG_CFG_PS1;
4522                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523                         ap->txconfig |= ANEG_CFG_PS2;
4524                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526                 tw32_f(MAC_MODE, tp->mac_mode);
4527                 udelay(40);
4528
4529                 ap->state = ANEG_STATE_ABILITY_DETECT;
4530                 break;
4531
4532         case ANEG_STATE_ABILITY_DETECT:
4533                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4534                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4535                 break;
4536
4537         case ANEG_STATE_ACK_DETECT_INIT:
4538                 ap->txconfig |= ANEG_CFG_ACK;
4539                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541                 tw32_f(MAC_MODE, tp->mac_mode);
4542                 udelay(40);
4543
4544                 ap->state = ANEG_STATE_ACK_DETECT;
4545
4546                 /* fallthru */
4547         case ANEG_STATE_ACK_DETECT:
4548                 if (ap->ack_match != 0) {
4549                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4552                         } else {
4553                                 ap->state = ANEG_STATE_AN_ENABLE;
4554                         }
4555                 } else if (ap->ability_match != 0 &&
4556                            ap->rxconfig == 0) {
4557                         ap->state = ANEG_STATE_AN_ENABLE;
4558                 }
4559                 break;
4560
4561         case ANEG_STATE_COMPLETE_ACK_INIT:
4562                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4563                         ret = ANEG_FAILED;
4564                         break;
4565                 }
4566                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567                                MR_LP_ADV_HALF_DUPLEX |
4568                                MR_LP_ADV_SYM_PAUSE |
4569                                MR_LP_ADV_ASYM_PAUSE |
4570                                MR_LP_ADV_REMOTE_FAULT1 |
4571                                MR_LP_ADV_REMOTE_FAULT2 |
4572                                MR_LP_ADV_NEXT_PAGE |
4573                                MR_TOGGLE_RX |
4574                                MR_NP_RX);
4575                 if (ap->rxconfig & ANEG_CFG_FD)
4576                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577                 if (ap->rxconfig & ANEG_CFG_HD)
4578                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579                 if (ap->rxconfig & ANEG_CFG_PS1)
4580                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581                 if (ap->rxconfig & ANEG_CFG_PS2)
4582                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583                 if (ap->rxconfig & ANEG_CFG_RF1)
4584                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585                 if (ap->rxconfig & ANEG_CFG_RF2)
4586                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587                 if (ap->rxconfig & ANEG_CFG_NP)
4588                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4589
4590                 ap->link_time = ap->cur_time;
4591
4592                 ap->flags ^= (MR_TOGGLE_TX);
4593                 if (ap->rxconfig & 0x0008)
4594                         ap->flags |= MR_TOGGLE_RX;
4595                 if (ap->rxconfig & ANEG_CFG_NP)
4596                         ap->flags |= MR_NP_RX;
4597                 ap->flags |= MR_PAGE_RX;
4598
4599                 ap->state = ANEG_STATE_COMPLETE_ACK;
4600                 ret = ANEG_TIMER_ENAB;
4601                 break;
4602
4603         case ANEG_STATE_COMPLETE_ACK:
4604                 if (ap->ability_match != 0 &&
4605                     ap->rxconfig == 0) {
4606                         ap->state = ANEG_STATE_AN_ENABLE;
4607                         break;
4608                 }
4609                 delta = ap->cur_time - ap->link_time;
4610                 if (delta > ANEG_STATE_SETTLE_TIME) {
4611                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4613                         } else {
4614                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615                                     !(ap->flags & MR_NP_RX)) {
4616                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4617                                 } else {
4618                                         ret = ANEG_FAILED;
4619                                 }
4620                         }
4621                 }
4622                 break;
4623
4624         case ANEG_STATE_IDLE_DETECT_INIT:
4625                 ap->link_time = ap->cur_time;
4626                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627                 tw32_f(MAC_MODE, tp->mac_mode);
4628                 udelay(40);
4629
4630                 ap->state = ANEG_STATE_IDLE_DETECT;
4631                 ret = ANEG_TIMER_ENAB;
4632                 break;
4633
4634         case ANEG_STATE_IDLE_DETECT:
4635                 if (ap->ability_match != 0 &&
4636                     ap->rxconfig == 0) {
4637                         ap->state = ANEG_STATE_AN_ENABLE;
4638                         break;
4639                 }
4640                 delta = ap->cur_time - ap->link_time;
4641                 if (delta > ANEG_STATE_SETTLE_TIME) {
4642                         /* XXX another gem from the Broadcom driver :( */
4643                         ap->state = ANEG_STATE_LINK_OK;
4644                 }
4645                 break;
4646
4647         case ANEG_STATE_LINK_OK:
4648                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4649                 ret = ANEG_DONE;
4650                 break;
4651
4652         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653                 /* ??? unimplemented */
4654                 break;
4655
4656         case ANEG_STATE_NEXT_PAGE_WAIT:
4657                 /* ??? unimplemented */
4658                 break;
4659
4660         default:
4661                 ret = ANEG_FAILED;
4662                 break;
4663         }
4664
4665         return ret;
4666 }
4667
4668 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4669 {
4670         int res = 0;
4671         struct tg3_fiber_aneginfo aninfo;
4672         int status = ANEG_FAILED;
4673         unsigned int tick;
4674         u32 tmp;
4675
4676         tw32_f(MAC_TX_AUTO_NEG, 0);
4677
4678         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4680         udelay(40);
4681
4682         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4683         udelay(40);
4684
4685         memset(&aninfo, 0, sizeof(aninfo));
4686         aninfo.flags |= MR_AN_ENABLE;
4687         aninfo.state = ANEG_STATE_UNKNOWN;
4688         aninfo.cur_time = 0;
4689         tick = 0;
4690         while (++tick < 195000) {
4691                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692                 if (status == ANEG_DONE || status == ANEG_FAILED)
4693                         break;
4694
4695                 udelay(1);
4696         }
4697
4698         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699         tw32_f(MAC_MODE, tp->mac_mode);
4700         udelay(40);
4701
4702         *txflags = aninfo.txconfig;
4703         *rxflags = aninfo.flags;
4704
4705         if (status == ANEG_DONE &&
4706             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707                              MR_LP_ADV_FULL_DUPLEX)))
4708                 res = 1;
4709
4710         return res;
4711 }
4712
4713 static void tg3_init_bcm8002(struct tg3 *tp)
4714 {
4715         u32 mac_status = tr32(MAC_STATUS);
4716         int i;
4717
4718         /* Reset when initting first time or we have a link. */
4719         if (tg3_flag(tp, INIT_COMPLETE) &&
4720             !(mac_status & MAC_STATUS_PCS_SYNCED))
4721                 return;
4722
4723         /* Set PLL lock range. */
4724         tg3_writephy(tp, 0x16, 0x8007);
4725
4726         /* SW reset */
4727         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4728
4729         /* Wait for reset to complete. */
4730         /* XXX schedule_timeout() ... */
4731         for (i = 0; i < 500; i++)
4732                 udelay(10);
4733
4734         /* Config mode; select PMA/Ch 1 regs. */
4735         tg3_writephy(tp, 0x10, 0x8411);
4736
4737         /* Enable auto-lock and comdet, select txclk for tx. */
4738         tg3_writephy(tp, 0x11, 0x0a10);
4739
4740         tg3_writephy(tp, 0x18, 0x00a0);
4741         tg3_writephy(tp, 0x16, 0x41ff);
4742
4743         /* Assert and deassert POR. */
4744         tg3_writephy(tp, 0x13, 0x0400);
4745         udelay(40);
4746         tg3_writephy(tp, 0x13, 0x0000);
4747
4748         tg3_writephy(tp, 0x11, 0x0a50);
4749         udelay(40);
4750         tg3_writephy(tp, 0x11, 0x0a10);
4751
4752         /* Wait for signal to stabilize */
4753         /* XXX schedule_timeout() ... */
4754         for (i = 0; i < 15000; i++)
4755                 udelay(10);
4756
4757         /* Deselect the channel register so we can read the PHYID
4758          * later.
4759          */
4760         tg3_writephy(tp, 0x10, 0x8011);
4761 }
4762
4763 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4764 {
4765         u16 flowctrl;
4766         u32 sg_dig_ctrl, sg_dig_status;
4767         u32 serdes_cfg, expected_sg_dig_ctrl;
4768         int workaround, port_a;
4769         int current_link_up;
4770
4771         serdes_cfg = 0;
4772         expected_sg_dig_ctrl = 0;
4773         workaround = 0;
4774         port_a = 1;
4775         current_link_up = 0;
4776
4777         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4779                 workaround = 1;
4780                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4781                         port_a = 0;
4782
4783                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4784                 /* preserve bits 20-23 for voltage regulator */
4785                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4786         }
4787
4788         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4789
4790         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4792                         if (workaround) {
4793                                 u32 val = serdes_cfg;
4794
4795                                 if (port_a)
4796                                         val |= 0xc010000;
4797                                 else
4798                                         val |= 0x4010000;
4799                                 tw32_f(MAC_SERDES_CFG, val);
4800                         }
4801
4802                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4803                 }
4804                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805                         tg3_setup_flow_control(tp, 0, 0);
4806                         current_link_up = 1;
4807                 }
4808                 goto out;
4809         }
4810
4811         /* Want auto-negotiation.  */
4812         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4813
4814         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815         if (flowctrl & ADVERTISE_1000XPAUSE)
4816                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4819
4820         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822                     tp->serdes_counter &&
4823                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824                                     MAC_STATUS_RCVD_CFG)) ==
4825                      MAC_STATUS_PCS_SYNCED)) {
4826                         tp->serdes_counter--;
4827                         current_link_up = 1;
4828                         goto out;
4829                 }
4830 restart_autoneg:
4831                 if (workaround)
4832                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4834                 udelay(5);
4835                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4836
4837                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840                                  MAC_STATUS_SIGNAL_DET)) {
4841                 sg_dig_status = tr32(SG_DIG_STATUS);
4842                 mac_status = tr32(MAC_STATUS);
4843
4844                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846                         u32 local_adv = 0, remote_adv = 0;
4847
4848                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849                                 local_adv |= ADVERTISE_1000XPAUSE;
4850                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4852
4853                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854                                 remote_adv |= LPA_1000XPAUSE;
4855                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4857
4858                         tp->link_config.rmt_adv =
4859                                            mii_adv_to_ethtool_adv_x(remote_adv);
4860
4861                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4862                         current_link_up = 1;
4863                         tp->serdes_counter = 0;
4864                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866                         if (tp->serdes_counter)
4867                                 tp->serdes_counter--;
4868                         else {
4869                                 if (workaround) {
4870                                         u32 val = serdes_cfg;
4871
4872                                         if (port_a)
4873                                                 val |= 0xc010000;
4874                                         else
4875                                                 val |= 0x4010000;
4876
4877                                         tw32_f(MAC_SERDES_CFG, val);
4878                                 }
4879
4880                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4881                                 udelay(40);
4882
4883                                 /* Link parallel detection - link is up */
4884                                 /* only if we have PCS_SYNC and not */
4885                                 /* receiving config code words */
4886                                 mac_status = tr32(MAC_STATUS);
4887                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889                                         tg3_setup_flow_control(tp, 0, 0);
4890                                         current_link_up = 1;
4891                                         tp->phy_flags |=
4892                                                 TG3_PHYFLG_PARALLEL_DETECT;
4893                                         tp->serdes_counter =
4894                                                 SERDES_PARALLEL_DET_TIMEOUT;
4895                                 } else
4896                                         goto restart_autoneg;
4897                         }
4898                 }
4899         } else {
4900                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4902         }
4903
4904 out:
4905         return current_link_up;
4906 }
4907
4908 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4909 {
4910         int current_link_up = 0;
4911
4912         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4913                 goto out;
4914
4915         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916                 u32 txflags, rxflags;
4917                 int i;
4918
4919                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920                         u32 local_adv = 0, remote_adv = 0;
4921
4922                         if (txflags & ANEG_CFG_PS1)
4923                                 local_adv |= ADVERTISE_1000XPAUSE;
4924                         if (txflags & ANEG_CFG_PS2)
4925                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4926
4927                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928                                 remote_adv |= LPA_1000XPAUSE;
4929                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4931
4932                         tp->link_config.rmt_adv =
4933                                            mii_adv_to_ethtool_adv_x(remote_adv);
4934
4935                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4936
4937                         current_link_up = 1;
4938                 }
4939                 for (i = 0; i < 30; i++) {
4940                         udelay(20);
4941                         tw32_f(MAC_STATUS,
4942                                (MAC_STATUS_SYNC_CHANGED |
4943                                 MAC_STATUS_CFG_CHANGED));
4944                         udelay(40);
4945                         if ((tr32(MAC_STATUS) &
4946                              (MAC_STATUS_SYNC_CHANGED |
4947                               MAC_STATUS_CFG_CHANGED)) == 0)
4948                                 break;
4949                 }
4950
4951                 mac_status = tr32(MAC_STATUS);
4952                 if (current_link_up == 0 &&
4953                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954                     !(mac_status & MAC_STATUS_RCVD_CFG))
4955                         current_link_up = 1;
4956         } else {
4957                 tg3_setup_flow_control(tp, 0, 0);
4958
4959                 /* Forcing 1000FD link up. */
4960                 current_link_up = 1;
4961
4962                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4963                 udelay(40);
4964
4965                 tw32_f(MAC_MODE, tp->mac_mode);
4966                 udelay(40);
4967         }
4968
4969 out:
4970         return current_link_up;
4971 }
4972
4973 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4974 {
4975         u32 orig_pause_cfg;
4976         u16 orig_active_speed;
4977         u8 orig_active_duplex;
4978         u32 mac_status;
4979         int current_link_up;
4980         int i;
4981
4982         orig_pause_cfg = tp->link_config.active_flowctrl;
4983         orig_active_speed = tp->link_config.active_speed;
4984         orig_active_duplex = tp->link_config.active_duplex;
4985
4986         if (!tg3_flag(tp, HW_AUTONEG) &&
4987             netif_carrier_ok(tp->dev) &&
4988             tg3_flag(tp, INIT_COMPLETE)) {
4989                 mac_status = tr32(MAC_STATUS);
4990                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4991                                MAC_STATUS_SIGNAL_DET |
4992                                MAC_STATUS_CFG_CHANGED |
4993                                MAC_STATUS_RCVD_CFG);
4994                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995                                    MAC_STATUS_SIGNAL_DET)) {
4996                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997                                             MAC_STATUS_CFG_CHANGED));
4998                         return 0;
4999                 }
5000         }
5001
5002         tw32_f(MAC_TX_AUTO_NEG, 0);
5003
5004         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006         tw32_f(MAC_MODE, tp->mac_mode);
5007         udelay(40);
5008
5009         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010                 tg3_init_bcm8002(tp);
5011
5012         /* Enable link change event even when serdes polling.  */
5013         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5014         udelay(40);
5015
5016         current_link_up = 0;
5017         tp->link_config.rmt_adv = 0;
5018         mac_status = tr32(MAC_STATUS);
5019
5020         if (tg3_flag(tp, HW_AUTONEG))
5021                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5022         else
5023                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5024
5025         tp->napi[0].hw_status->status =
5026                 (SD_STATUS_UPDATED |
5027                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5028
5029         for (i = 0; i < 100; i++) {
5030                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031                                     MAC_STATUS_CFG_CHANGED));
5032                 udelay(5);
5033                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034                                          MAC_STATUS_CFG_CHANGED |
5035                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5036                         break;
5037         }
5038
5039         mac_status = tr32(MAC_STATUS);
5040         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041                 current_link_up = 0;
5042                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043                     tp->serdes_counter == 0) {
5044                         tw32_f(MAC_MODE, (tp->mac_mode |
5045                                           MAC_MODE_SEND_CONFIGS));
5046                         udelay(1);
5047                         tw32_f(MAC_MODE, tp->mac_mode);
5048                 }
5049         }
5050
5051         if (current_link_up == 1) {
5052                 tp->link_config.active_speed = SPEED_1000;
5053                 tp->link_config.active_duplex = DUPLEX_FULL;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_1000MBPS_ON));
5057         } else {
5058                 tp->link_config.active_speed = SPEED_INVALID;
5059                 tp->link_config.active_duplex = DUPLEX_INVALID;
5060                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061                                     LED_CTRL_LNKLED_OVERRIDE |
5062                                     LED_CTRL_TRAFFIC_OVERRIDE));
5063         }
5064
5065         if (current_link_up != netif_carrier_ok(tp->dev)) {
5066                 if (current_link_up)
5067                         netif_carrier_on(tp->dev);
5068                 else
5069                         netif_carrier_off(tp->dev);
5070                 tg3_link_report(tp);
5071         } else {
5072                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073                 if (orig_pause_cfg != now_pause_cfg ||
5074                     orig_active_speed != tp->link_config.active_speed ||
5075                     orig_active_duplex != tp->link_config.active_duplex)
5076                         tg3_link_report(tp);
5077         }
5078
5079         return 0;
5080 }
5081
5082 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5083 {
5084         int current_link_up, err = 0;
5085         u32 bmsr, bmcr;
5086         u16 current_speed;
5087         u8 current_duplex;
5088         u32 local_adv, remote_adv;
5089
5090         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091         tw32_f(MAC_MODE, tp->mac_mode);
5092         udelay(40);
5093
5094         tw32(MAC_EVENT, 0);
5095
5096         tw32_f(MAC_STATUS,
5097              (MAC_STATUS_SYNC_CHANGED |
5098               MAC_STATUS_CFG_CHANGED |
5099               MAC_STATUS_MI_COMPLETION |
5100               MAC_STATUS_LNKSTATE_CHANGED));
5101         udelay(40);
5102
5103         if (force_reset)
5104                 tg3_phy_reset(tp);
5105
5106         current_link_up = 0;
5107         current_speed = SPEED_INVALID;
5108         current_duplex = DUPLEX_INVALID;
5109         tp->link_config.rmt_adv = 0;
5110
5111         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115                         bmsr |= BMSR_LSTATUS;
5116                 else
5117                         bmsr &= ~BMSR_LSTATUS;
5118         }
5119
5120         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5121
5122         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124                 /* do nothing, just check for link up at the end */
5125         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5126                 u32 adv, newadv;
5127
5128                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130                                  ADVERTISE_1000XPAUSE |
5131                                  ADVERTISE_1000XPSE_ASYM |
5132                                  ADVERTISE_SLCT);
5133
5134                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5136
5137                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5139                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140                         tg3_writephy(tp, MII_BMCR, bmcr);
5141
5142                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145
5146                         return err;
5147                 }
5148         } else {
5149                 u32 new_bmcr;
5150
5151                 bmcr &= ~BMCR_SPEED1000;
5152                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5153
5154                 if (tp->link_config.duplex == DUPLEX_FULL)
5155                         new_bmcr |= BMCR_FULLDPLX;
5156
5157                 if (new_bmcr != bmcr) {
5158                         /* BMCR_SPEED1000 is a reserved bit that needs
5159                          * to be set on write.
5160                          */
5161                         new_bmcr |= BMCR_SPEED1000;
5162
5163                         /* Force a linkdown */
5164                         if (netif_carrier_ok(tp->dev)) {
5165                                 u32 adv;
5166
5167                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168                                 adv &= ~(ADVERTISE_1000XFULL |
5169                                          ADVERTISE_1000XHALF |
5170                                          ADVERTISE_SLCT);
5171                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5172                                 tg3_writephy(tp, MII_BMCR, bmcr |
5173                                                            BMCR_ANRESTART |
5174                                                            BMCR_ANENABLE);
5175                                 udelay(10);
5176                                 netif_carrier_off(tp->dev);
5177                         }
5178                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5179                         bmcr = new_bmcr;
5180                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5183                             ASIC_REV_5714) {
5184                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185                                         bmsr |= BMSR_LSTATUS;
5186                                 else
5187                                         bmsr &= ~BMSR_LSTATUS;
5188                         }
5189                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5190                 }
5191         }
5192
5193         if (bmsr & BMSR_LSTATUS) {
5194                 current_speed = SPEED_1000;
5195                 current_link_up = 1;
5196                 if (bmcr & BMCR_FULLDPLX)
5197                         current_duplex = DUPLEX_FULL;
5198                 else
5199                         current_duplex = DUPLEX_HALF;
5200
5201                 local_adv = 0;
5202                 remote_adv = 0;
5203
5204                 if (bmcr & BMCR_ANENABLE) {
5205                         u32 common;
5206
5207                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209                         common = local_adv & remote_adv;
5210                         if (common & (ADVERTISE_1000XHALF |
5211                                       ADVERTISE_1000XFULL)) {
5212                                 if (common & ADVERTISE_1000XFULL)
5213                                         current_duplex = DUPLEX_FULL;
5214                                 else
5215                                         current_duplex = DUPLEX_HALF;
5216
5217                                 tp->link_config.rmt_adv =
5218                                            mii_adv_to_ethtool_adv_x(remote_adv);
5219                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5220                                 /* Link is up via parallel detect */
5221                         } else {
5222                                 current_link_up = 0;
5223                         }
5224                 }
5225         }
5226
5227         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5229
5230         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231         if (tp->link_config.active_duplex == DUPLEX_HALF)
5232                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5233
5234         tw32_f(MAC_MODE, tp->mac_mode);
5235         udelay(40);
5236
5237         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5238
5239         tp->link_config.active_speed = current_speed;
5240         tp->link_config.active_duplex = current_duplex;
5241
5242         if (current_link_up != netif_carrier_ok(tp->dev)) {
5243                 if (current_link_up)
5244                         netif_carrier_on(tp->dev);
5245                 else {
5246                         netif_carrier_off(tp->dev);
5247                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5248                 }
5249                 tg3_link_report(tp);
5250         }
5251         return err;
5252 }
5253
5254 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5255 {
5256         if (tp->serdes_counter) {
5257                 /* Give autoneg time to complete. */
5258                 tp->serdes_counter--;
5259                 return;
5260         }
5261
5262         if (!netif_carrier_ok(tp->dev) &&
5263             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5264                 u32 bmcr;
5265
5266                 tg3_readphy(tp, MII_BMCR, &bmcr);
5267                 if (bmcr & BMCR_ANENABLE) {
5268                         u32 phy1, phy2;
5269
5270                         /* Select shadow register 0x1f */
5271                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5273
5274                         /* Select expansion interrupt status register */
5275                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276                                          MII_TG3_DSP_EXP1_INT_STAT);
5277                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5279
5280                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281                                 /* We have signal detect and not receiving
5282                                  * config code words, link is up by parallel
5283                                  * detection.
5284                                  */
5285
5286                                 bmcr &= ~BMCR_ANENABLE;
5287                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288                                 tg3_writephy(tp, MII_BMCR, bmcr);
5289                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5290                         }
5291                 }
5292         } else if (netif_carrier_ok(tp->dev) &&
5293                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5295                 u32 phy2;
5296
5297                 /* Select expansion interrupt status register */
5298                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299                                  MII_TG3_DSP_EXP1_INT_STAT);
5300                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5301                 if (phy2 & 0x20) {
5302                         u32 bmcr;
5303
5304                         /* Config code words received, turn on autoneg. */
5305                         tg3_readphy(tp, MII_BMCR, &bmcr);
5306                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5307
5308                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5309
5310                 }
5311         }
5312 }
5313
5314 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5315 {
5316         u32 val;
5317         int err;
5318
5319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320                 err = tg3_setup_fiber_phy(tp, force_reset);
5321         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5323         else
5324                 err = tg3_setup_copper_phy(tp, force_reset);
5325
5326         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5327                 u32 scale;
5328
5329                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5331                         scale = 65;
5332                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5333                         scale = 6;
5334                 else
5335                         scale = 12;
5336
5337                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339                 tw32(GRC_MISC_CFG, val);
5340         }
5341
5342         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343               (6 << TX_LENGTHS_IPG_SHIFT);
5344         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345                 val |= tr32(MAC_TX_LENGTHS) &
5346                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5348
5349         if (tp->link_config.active_speed == SPEED_1000 &&
5350             tp->link_config.active_duplex == DUPLEX_HALF)
5351                 tw32(MAC_TX_LENGTHS, val |
5352                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5353         else
5354                 tw32(MAC_TX_LENGTHS, val |
5355                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5356
5357         if (!tg3_flag(tp, 5705_PLUS)) {
5358                 if (netif_carrier_ok(tp->dev)) {
5359                         tw32(HOSTCC_STAT_COAL_TICKS,
5360                              tp->coal.stats_block_coalesce_usecs);
5361                 } else {
5362                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5363                 }
5364         }
5365
5366         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367                 val = tr32(PCIE_PWR_MGMT_THRESH);
5368                 if (!netif_carrier_ok(tp->dev))
5369                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5370                               tp->pwrmgmt_thresh;
5371                 else
5372                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373                 tw32(PCIE_PWR_MGMT_THRESH, val);
5374         }
5375
5376         return err;
5377 }
5378
5379 static inline int tg3_irq_sync(struct tg3 *tp)
5380 {
5381         return tp->irq_sync;
5382 }
5383
5384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5385 {
5386         int i;
5387
5388         dst = (u32 *)((u8 *)dst + off);
5389         for (i = 0; i < len; i += sizeof(u32))
5390                 *dst++ = tr32(off + i);
5391 }
5392
5393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5394 {
5395         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5414
5415         if (tg3_flag(tp, SUPPORT_MSIX))
5416                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5417
5418         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5426
5427         if (!tg3_flag(tp, 5705_PLUS)) {
5428                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5431         }
5432
5433         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5438
5439         if (tg3_flag(tp, NVRAM))
5440                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5441 }
5442
5443 static void tg3_dump_state(struct tg3 *tp)
5444 {
5445         int i;
5446         u32 *regs;
5447
5448         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5449         if (!regs) {
5450                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5451                 return;
5452         }
5453
5454         if (tg3_flag(tp, PCI_EXPRESS)) {
5455                 /* Read up to but not including private PCI registers */
5456                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457                         regs[i / sizeof(u32)] = tr32(i);
5458         } else
5459                 tg3_dump_legacy_regs(tp, regs);
5460
5461         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462                 if (!regs[i + 0] && !regs[i + 1] &&
5463                     !regs[i + 2] && !regs[i + 3])
5464                         continue;
5465
5466                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5467                            i * 4,
5468                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5469         }
5470
5471         kfree(regs);
5472
5473         for (i = 0; i < tp->irq_cnt; i++) {
5474                 struct tg3_napi *tnapi = &tp->napi[i];
5475
5476                 /* SW status block */
5477                 netdev_err(tp->dev,
5478                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5479                            i,
5480                            tnapi->hw_status->status,
5481                            tnapi->hw_status->status_tag,
5482                            tnapi->hw_status->rx_jumbo_consumer,
5483                            tnapi->hw_status->rx_consumer,
5484                            tnapi->hw_status->rx_mini_consumer,
5485                            tnapi->hw_status->idx[0].rx_producer,
5486                            tnapi->hw_status->idx[0].tx_consumer);
5487
5488                 netdev_err(tp->dev,
5489                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5490                            i,
5491                            tnapi->last_tag, tnapi->last_irq_tag,
5492                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5493                            tnapi->rx_rcb_ptr,
5494                            tnapi->prodring.rx_std_prod_idx,
5495                            tnapi->prodring.rx_std_cons_idx,
5496                            tnapi->prodring.rx_jmb_prod_idx,
5497                            tnapi->prodring.rx_jmb_cons_idx);
5498         }
5499 }
5500
5501 /* This is called whenever we suspect that the system chipset is re-
5502  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503  * is bogus tx completions. We try to recover by setting the
5504  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5505  * in the workqueue.
5506  */
5507 static void tg3_tx_recover(struct tg3 *tp)
5508 {
5509         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5511
5512         netdev_warn(tp->dev,
5513                     "The system may be re-ordering memory-mapped I/O "
5514                     "cycles to the network device, attempting to recover. "
5515                     "Please report the problem to the driver maintainer "
5516                     "and include system chipset information.\n");
5517
5518         spin_lock(&tp->lock);
5519         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520         spin_unlock(&tp->lock);
5521 }
5522
5523 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5524 {
5525         /* Tell compiler to fetch tx indices from memory. */
5526         barrier();
5527         return tnapi->tx_pending -
5528                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5529 }
5530
5531 /* Tigon3 never reports partial packet sends.  So we do not
5532  * need special logic to handle SKBs that have not had all
5533  * of their frags sent yet, like SunGEM does.
5534  */
5535 static void tg3_tx(struct tg3_napi *tnapi)
5536 {
5537         struct tg3 *tp = tnapi->tp;
5538         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539         u32 sw_idx = tnapi->tx_cons;
5540         struct netdev_queue *txq;
5541         int index = tnapi - tp->napi;
5542         unsigned int pkts_compl = 0, bytes_compl = 0;
5543
5544         if (tg3_flag(tp, ENABLE_TSS))
5545                 index--;
5546
5547         txq = netdev_get_tx_queue(tp->dev, index);
5548
5549         while (sw_idx != hw_idx) {
5550                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551                 struct sk_buff *skb = ri->skb;
5552                 int i, tx_bug = 0;
5553
5554                 if (unlikely(skb == NULL)) {
5555                         tg3_tx_recover(tp);
5556                         return;
5557                 }
5558
5559                 pci_unmap_single(tp->pdev,
5560                                  dma_unmap_addr(ri, mapping),
5561                                  skb_headlen(skb),
5562                                  PCI_DMA_TODEVICE);
5563
5564                 ri->skb = NULL;
5565
5566                 while (ri->fragmented) {
5567                         ri->fragmented = false;
5568                         sw_idx = NEXT_TX(sw_idx);
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                 }
5571
5572                 sw_idx = NEXT_TX(sw_idx);
5573
5574                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575                         ri = &tnapi->tx_buffers[sw_idx];
5576                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5577                                 tx_bug = 1;
5578
5579                         pci_unmap_page(tp->pdev,
5580                                        dma_unmap_addr(ri, mapping),
5581                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5582                                        PCI_DMA_TODEVICE);
5583
5584                         while (ri->fragmented) {
5585                                 ri->fragmented = false;
5586                                 sw_idx = NEXT_TX(sw_idx);
5587                                 ri = &tnapi->tx_buffers[sw_idx];
5588                         }
5589
5590                         sw_idx = NEXT_TX(sw_idx);
5591                 }
5592
5593                 pkts_compl++;
5594                 bytes_compl += skb->len;
5595
5596                 dev_kfree_skb(skb);
5597
5598                 if (unlikely(tx_bug)) {
5599                         tg3_tx_recover(tp);
5600                         return;
5601                 }
5602         }
5603
5604         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5605
5606         tnapi->tx_cons = sw_idx;
5607
5608         /* Need to make the tx_cons update visible to tg3_start_xmit()
5609          * before checking for netif_queue_stopped().  Without the
5610          * memory barrier, there is a small possibility that tg3_start_xmit()
5611          * will miss it and cause the queue to be stopped forever.
5612          */
5613         smp_mb();
5614
5615         if (unlikely(netif_tx_queue_stopped(txq) &&
5616                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617                 __netif_tx_lock(txq, smp_processor_id());
5618                 if (netif_tx_queue_stopped(txq) &&
5619                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620                         netif_tx_wake_queue(txq);
5621                 __netif_tx_unlock(txq);
5622         }
5623 }
5624
5625 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5626 {
5627         if (!ri->data)
5628                 return;
5629
5630         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5631                          map_sz, PCI_DMA_FROMDEVICE);
5632         kfree(ri->data);
5633         ri->data = NULL;
5634 }
5635
5636 /* Returns size of skb allocated or < 0 on error.
5637  *
5638  * We only need to fill in the address because the other members
5639  * of the RX descriptor are invariant, see tg3_init_rings.
5640  *
5641  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5642  * posting buffers we only dirty the first cache line of the RX
5643  * descriptor (containing the address).  Whereas for the RX status
5644  * buffers the cpu only reads the last cacheline of the RX descriptor
5645  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5646  */
5647 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5648                             u32 opaque_key, u32 dest_idx_unmasked)
5649 {
5650         struct tg3_rx_buffer_desc *desc;
5651         struct ring_info *map;
5652         u8 *data;
5653         dma_addr_t mapping;
5654         int skb_size, data_size, dest_idx;
5655
5656         switch (opaque_key) {
5657         case RXD_OPAQUE_RING_STD:
5658                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5659                 desc = &tpr->rx_std[dest_idx];
5660                 map = &tpr->rx_std_buffers[dest_idx];
5661                 data_size = tp->rx_pkt_map_sz;
5662                 break;
5663
5664         case RXD_OPAQUE_RING_JUMBO:
5665                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5666                 desc = &tpr->rx_jmb[dest_idx].std;
5667                 map = &tpr->rx_jmb_buffers[dest_idx];
5668                 data_size = TG3_RX_JMB_MAP_SZ;
5669                 break;
5670
5671         default:
5672                 return -EINVAL;
5673         }
5674
5675         /* Do not overwrite any of the map or rp information
5676          * until we are sure we can commit to a new buffer.
5677          *
5678          * Callers depend upon this behavior and assume that
5679          * we leave everything unchanged if we fail.
5680          */
5681         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5682                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5683         data = kmalloc(skb_size, GFP_ATOMIC);
5684         if (!data)
5685                 return -ENOMEM;
5686
5687         mapping = pci_map_single(tp->pdev,
5688                                  data + TG3_RX_OFFSET(tp),
5689                                  data_size,
5690                                  PCI_DMA_FROMDEVICE);
5691         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5692                 kfree(data);
5693                 return -EIO;
5694         }
5695
5696         map->data = data;
5697         dma_unmap_addr_set(map, mapping, mapping);
5698
5699         desc->addr_hi = ((u64)mapping >> 32);
5700         desc->addr_lo = ((u64)mapping & 0xffffffff);
5701
5702         return data_size;
5703 }
5704
5705 /* We only need to move over in the address because the other
5706  * members of the RX descriptor are invariant.  See notes above
5707  * tg3_alloc_rx_data for full details.
5708  */
5709 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5710                            struct tg3_rx_prodring_set *dpr,
5711                            u32 opaque_key, int src_idx,
5712                            u32 dest_idx_unmasked)
5713 {
5714         struct tg3 *tp = tnapi->tp;
5715         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5716         struct ring_info *src_map, *dest_map;
5717         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5718         int dest_idx;
5719
5720         switch (opaque_key) {
5721         case RXD_OPAQUE_RING_STD:
5722                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5723                 dest_desc = &dpr->rx_std[dest_idx];
5724                 dest_map = &dpr->rx_std_buffers[dest_idx];
5725                 src_desc = &spr->rx_std[src_idx];
5726                 src_map = &spr->rx_std_buffers[src_idx];
5727                 break;
5728
5729         case RXD_OPAQUE_RING_JUMBO:
5730                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5731                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5732                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5733                 src_desc = &spr->rx_jmb[src_idx].std;
5734                 src_map = &spr->rx_jmb_buffers[src_idx];
5735                 break;
5736
5737         default:
5738                 return;
5739         }
5740
5741         dest_map->data = src_map->data;
5742         dma_unmap_addr_set(dest_map, mapping,
5743                            dma_unmap_addr(src_map, mapping));
5744         dest_desc->addr_hi = src_desc->addr_hi;
5745         dest_desc->addr_lo = src_desc->addr_lo;
5746
5747         /* Ensure that the update to the skb happens after the physical
5748          * addresses have been transferred to the new BD location.
5749          */
5750         smp_wmb();
5751
5752         src_map->data = NULL;
5753 }
5754
5755 /* The RX ring scheme is composed of multiple rings which post fresh
5756  * buffers to the chip, and one special ring the chip uses to report
5757  * status back to the host.
5758  *
5759  * The special ring reports the status of received packets to the
5760  * host.  The chip does not write into the original descriptor the
5761  * RX buffer was obtained from.  The chip simply takes the original
5762  * descriptor as provided by the host, updates the status and length
5763  * field, then writes this into the next status ring entry.
5764  *
5765  * Each ring the host uses to post buffers to the chip is described
5766  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5767  * it is first placed into the on-chip ram.  When the packet's length
5768  * is known, it walks down the TG3_BDINFO entries to select the ring.
5769  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5770  * which is within the range of the new packet's length is chosen.
5771  *
5772  * The "separate ring for rx status" scheme may sound queer, but it makes
5773  * sense from a cache coherency perspective.  If only the host writes
5774  * to the buffer post rings, and only the chip writes to the rx status
5775  * rings, then cache lines never move beyond shared-modified state.
5776  * If both the host and chip were to write into the same ring, cache line
5777  * eviction could occur since both entities want it in an exclusive state.
5778  */
5779 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5780 {
5781         struct tg3 *tp = tnapi->tp;
5782         u32 work_mask, rx_std_posted = 0;
5783         u32 std_prod_idx, jmb_prod_idx;
5784         u32 sw_idx = tnapi->rx_rcb_ptr;
5785         u16 hw_idx;
5786         int received;
5787         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5788
5789         hw_idx = *(tnapi->rx_rcb_prod_idx);
5790         /*
5791          * We need to order the read of hw_idx and the read of
5792          * the opaque cookie.
5793          */
5794         rmb();
5795         work_mask = 0;
5796         received = 0;
5797         std_prod_idx = tpr->rx_std_prod_idx;
5798         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5799         while (sw_idx != hw_idx && budget > 0) {
5800                 struct ring_info *ri;
5801                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5802                 unsigned int len;
5803                 struct sk_buff *skb;
5804                 dma_addr_t dma_addr;
5805                 u32 opaque_key, desc_idx, *post_ptr;
5806                 u8 *data;
5807
5808                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5809                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5810                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5811                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5812                         dma_addr = dma_unmap_addr(ri, mapping);
5813                         data = ri->data;
5814                         post_ptr = &std_prod_idx;
5815                         rx_std_posted++;
5816                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5817                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5818                         dma_addr = dma_unmap_addr(ri, mapping);
5819                         data = ri->data;
5820                         post_ptr = &jmb_prod_idx;
5821                 } else
5822                         goto next_pkt_nopost;
5823
5824                 work_mask |= opaque_key;
5825
5826                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5827                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5828                 drop_it:
5829                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5830                                        desc_idx, *post_ptr);
5831                 drop_it_no_recycle:
5832                         /* Other statistics kept track of by card. */
5833                         tp->rx_dropped++;
5834                         goto next_pkt;
5835                 }
5836
5837                 prefetch(data + TG3_RX_OFFSET(tp));
5838                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5839                       ETH_FCS_LEN;
5840
5841                 if (len > TG3_RX_COPY_THRESH(tp)) {
5842                         int skb_size;
5843
5844                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5845                                                     *post_ptr);
5846                         if (skb_size < 0)
5847                                 goto drop_it;
5848
5849                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5850                                          PCI_DMA_FROMDEVICE);
5851
5852                         skb = build_skb(data);
5853                         if (!skb) {
5854                                 kfree(data);
5855                                 goto drop_it_no_recycle;
5856                         }
5857                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5858                         /* Ensure that the update to the data happens
5859                          * after the usage of the old DMA mapping.
5860                          */
5861                         smp_wmb();
5862
5863                         ri->data = NULL;
5864
5865                 } else {
5866                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5867                                        desc_idx, *post_ptr);
5868
5869                         skb = netdev_alloc_skb(tp->dev,
5870                                                len + TG3_RAW_IP_ALIGN);
5871                         if (skb == NULL)
5872                                 goto drop_it_no_recycle;
5873
5874                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5875                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5876                         memcpy(skb->data,
5877                                data + TG3_RX_OFFSET(tp),
5878                                len);
5879                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5880                 }
5881
5882                 skb_put(skb, len);
5883                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5884                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5885                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5886                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5887                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5888                 else
5889                         skb_checksum_none_assert(skb);
5890
5891                 skb->protocol = eth_type_trans(skb, tp->dev);
5892
5893                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5894                     skb->protocol != htons(ETH_P_8021Q)) {
5895                         dev_kfree_skb(skb);
5896                         goto drop_it_no_recycle;
5897                 }
5898
5899                 if (desc->type_flags & RXD_FLAG_VLAN &&
5900                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5901                         __vlan_hwaccel_put_tag(skb,
5902                                                desc->err_vlan & RXD_VLAN_MASK);
5903
5904                 napi_gro_receive(&tnapi->napi, skb);
5905
5906                 received++;
5907                 budget--;
5908
5909 next_pkt:
5910                 (*post_ptr)++;
5911
5912                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5913                         tpr->rx_std_prod_idx = std_prod_idx &
5914                                                tp->rx_std_ring_mask;
5915                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916                                      tpr->rx_std_prod_idx);
5917                         work_mask &= ~RXD_OPAQUE_RING_STD;
5918                         rx_std_posted = 0;
5919                 }
5920 next_pkt_nopost:
5921                 sw_idx++;
5922                 sw_idx &= tp->rx_ret_ring_mask;
5923
5924                 /* Refresh hw_idx to see if there is new work */
5925                 if (sw_idx == hw_idx) {
5926                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5927                         rmb();
5928                 }
5929         }
5930
5931         /* ACK the status ring. */
5932         tnapi->rx_rcb_ptr = sw_idx;
5933         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5934
5935         /* Refill RX ring(s). */
5936         if (!tg3_flag(tp, ENABLE_RSS)) {
5937                 if (work_mask & RXD_OPAQUE_RING_STD) {
5938                         tpr->rx_std_prod_idx = std_prod_idx &
5939                                                tp->rx_std_ring_mask;
5940                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5941                                      tpr->rx_std_prod_idx);
5942                 }
5943                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5944                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5945                                                tp->rx_jmb_ring_mask;
5946                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5947                                      tpr->rx_jmb_prod_idx);
5948                 }
5949                 mmiowb();
5950         } else if (work_mask) {
5951                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5952                  * updated before the producer indices can be updated.
5953                  */
5954                 smp_wmb();
5955
5956                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5957                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5958
5959                 if (tnapi != &tp->napi[1])
5960                         napi_schedule(&tp->napi[1].napi);
5961         }
5962
5963         return received;
5964 }
5965
5966 static void tg3_poll_link(struct tg3 *tp)
5967 {
5968         /* handle link change and other phy events */
5969         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5970                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5971
5972                 if (sblk->status & SD_STATUS_LINK_CHG) {
5973                         sblk->status = SD_STATUS_UPDATED |
5974                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5975                         spin_lock(&tp->lock);
5976                         if (tg3_flag(tp, USE_PHYLIB)) {
5977                                 tw32_f(MAC_STATUS,
5978                                      (MAC_STATUS_SYNC_CHANGED |
5979                                       MAC_STATUS_CFG_CHANGED |
5980                                       MAC_STATUS_MI_COMPLETION |
5981                                       MAC_STATUS_LNKSTATE_CHANGED));
5982                                 udelay(40);
5983                         } else
5984                                 tg3_setup_phy(tp, 0);
5985                         spin_unlock(&tp->lock);
5986                 }
5987         }
5988 }
5989
5990 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5991                                 struct tg3_rx_prodring_set *dpr,
5992                                 struct tg3_rx_prodring_set *spr)
5993 {
5994         u32 si, di, cpycnt, src_prod_idx;
5995         int i, err = 0;
5996
5997         while (1) {
5998                 src_prod_idx = spr->rx_std_prod_idx;
5999
6000                 /* Make sure updates to the rx_std_buffers[] entries and the
6001                  * standard producer index are seen in the correct order.
6002                  */
6003                 smp_rmb();
6004
6005                 if (spr->rx_std_cons_idx == src_prod_idx)
6006                         break;
6007
6008                 if (spr->rx_std_cons_idx < src_prod_idx)
6009                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6010                 else
6011                         cpycnt = tp->rx_std_ring_mask + 1 -
6012                                  spr->rx_std_cons_idx;
6013
6014                 cpycnt = min(cpycnt,
6015                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6016
6017                 si = spr->rx_std_cons_idx;
6018                 di = dpr->rx_std_prod_idx;
6019
6020                 for (i = di; i < di + cpycnt; i++) {
6021                         if (dpr->rx_std_buffers[i].data) {
6022                                 cpycnt = i - di;
6023                                 err = -ENOSPC;
6024                                 break;
6025                         }
6026                 }
6027
6028                 if (!cpycnt)
6029                         break;
6030
6031                 /* Ensure that updates to the rx_std_buffers ring and the
6032                  * shadowed hardware producer ring from tg3_recycle_skb() are
6033                  * ordered correctly WRT the skb check above.
6034                  */
6035                 smp_rmb();
6036
6037                 memcpy(&dpr->rx_std_buffers[di],
6038                        &spr->rx_std_buffers[si],
6039                        cpycnt * sizeof(struct ring_info));
6040
6041                 for (i = 0; i < cpycnt; i++, di++, si++) {
6042                         struct tg3_rx_buffer_desc *sbd, *dbd;
6043                         sbd = &spr->rx_std[si];
6044                         dbd = &dpr->rx_std[di];
6045                         dbd->addr_hi = sbd->addr_hi;
6046                         dbd->addr_lo = sbd->addr_lo;
6047                 }
6048
6049                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6050                                        tp->rx_std_ring_mask;
6051                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6052                                        tp->rx_std_ring_mask;
6053         }
6054
6055         while (1) {
6056                 src_prod_idx = spr->rx_jmb_prod_idx;
6057
6058                 /* Make sure updates to the rx_jmb_buffers[] entries and
6059                  * the jumbo producer index are seen in the correct order.
6060                  */
6061                 smp_rmb();
6062
6063                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6064                         break;
6065
6066                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6067                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6068                 else
6069                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6070                                  spr->rx_jmb_cons_idx;
6071
6072                 cpycnt = min(cpycnt,
6073                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6074
6075                 si = spr->rx_jmb_cons_idx;
6076                 di = dpr->rx_jmb_prod_idx;
6077
6078                 for (i = di; i < di + cpycnt; i++) {
6079                         if (dpr->rx_jmb_buffers[i].data) {
6080                                 cpycnt = i - di;
6081                                 err = -ENOSPC;
6082                                 break;
6083                         }
6084                 }
6085
6086                 if (!cpycnt)
6087                         break;
6088
6089                 /* Ensure that updates to the rx_jmb_buffers ring and the
6090                  * shadowed hardware producer ring from tg3_recycle_skb() are
6091                  * ordered correctly WRT the skb check above.
6092                  */
6093                 smp_rmb();
6094
6095                 memcpy(&dpr->rx_jmb_buffers[di],
6096                        &spr->rx_jmb_buffers[si],
6097                        cpycnt * sizeof(struct ring_info));
6098
6099                 for (i = 0; i < cpycnt; i++, di++, si++) {
6100                         struct tg3_rx_buffer_desc *sbd, *dbd;
6101                         sbd = &spr->rx_jmb[si].std;
6102                         dbd = &dpr->rx_jmb[di].std;
6103                         dbd->addr_hi = sbd->addr_hi;
6104                         dbd->addr_lo = sbd->addr_lo;
6105                 }
6106
6107                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6108                                        tp->rx_jmb_ring_mask;
6109                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6110                                        tp->rx_jmb_ring_mask;
6111         }
6112
6113         return err;
6114 }
6115
6116 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6117 {
6118         struct tg3 *tp = tnapi->tp;
6119
6120         /* run TX completion thread */
6121         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6122                 tg3_tx(tnapi);
6123                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6124                         return work_done;
6125         }
6126
6127         /* run RX thread, within the bounds set by NAPI.
6128          * All RX "locking" is done by ensuring outside
6129          * code synchronizes with tg3->napi.poll()
6130          */
6131         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6132                 work_done += tg3_rx(tnapi, budget - work_done);
6133
6134         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6135                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6136                 int i, err = 0;
6137                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6138                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6139
6140                 for (i = 1; i < tp->irq_cnt; i++)
6141                         err |= tg3_rx_prodring_xfer(tp, dpr,
6142                                                     &tp->napi[i].prodring);
6143
6144                 wmb();
6145
6146                 if (std_prod_idx != dpr->rx_std_prod_idx)
6147                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6148                                      dpr->rx_std_prod_idx);
6149
6150                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6151                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6152                                      dpr->rx_jmb_prod_idx);
6153
6154                 mmiowb();
6155
6156                 if (err)
6157                         tw32_f(HOSTCC_MODE, tp->coal_now);
6158         }
6159
6160         return work_done;
6161 }
6162
6163 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6164 {
6165         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6166                 schedule_work(&tp->reset_task);
6167 }
6168
6169 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6170 {
6171         cancel_work_sync(&tp->reset_task);
6172         tg3_flag_clear(tp, RESET_TASK_PENDING);
6173 }
6174
6175 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6176 {
6177         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6178         struct tg3 *tp = tnapi->tp;
6179         int work_done = 0;
6180         struct tg3_hw_status *sblk = tnapi->hw_status;
6181
6182         while (1) {
6183                 work_done = tg3_poll_work(tnapi, work_done, budget);
6184
6185                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6186                         goto tx_recovery;
6187
6188                 if (unlikely(work_done >= budget))
6189                         break;
6190
6191                 /* tp->last_tag is used in tg3_int_reenable() below
6192                  * to tell the hw how much work has been processed,
6193                  * so we must read it before checking for more work.
6194                  */
6195                 tnapi->last_tag = sblk->status_tag;
6196                 tnapi->last_irq_tag = tnapi->last_tag;
6197                 rmb();
6198
6199                 /* check for RX/TX work to do */
6200                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6201                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6202                         napi_complete(napi);
6203                         /* Reenable interrupts. */
6204                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6205                         mmiowb();
6206                         break;
6207                 }
6208         }
6209
6210         return work_done;
6211
6212 tx_recovery:
6213         /* work_done is guaranteed to be less than budget. */
6214         napi_complete(napi);
6215         tg3_reset_task_schedule(tp);
6216         return work_done;
6217 }
6218
6219 static void tg3_process_error(struct tg3 *tp)
6220 {
6221         u32 val;
6222         bool real_error = false;
6223
6224         if (tg3_flag(tp, ERROR_PROCESSED))
6225                 return;
6226
6227         /* Check Flow Attention register */
6228         val = tr32(HOSTCC_FLOW_ATTN);
6229         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6230                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6231                 real_error = true;
6232         }
6233
6234         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6235                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6236                 real_error = true;
6237         }
6238
6239         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6240                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6241                 real_error = true;
6242         }
6243
6244         if (!real_error)
6245                 return;
6246
6247         tg3_dump_state(tp);
6248
6249         tg3_flag_set(tp, ERROR_PROCESSED);
6250         tg3_reset_task_schedule(tp);
6251 }
6252
6253 static int tg3_poll(struct napi_struct *napi, int budget)
6254 {
6255         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6256         struct tg3 *tp = tnapi->tp;
6257         int work_done = 0;
6258         struct tg3_hw_status *sblk = tnapi->hw_status;
6259
6260         while (1) {
6261                 if (sblk->status & SD_STATUS_ERROR)
6262                         tg3_process_error(tp);
6263
6264                 tg3_poll_link(tp);
6265
6266                 work_done = tg3_poll_work(tnapi, work_done, budget);
6267
6268                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6269                         goto tx_recovery;
6270
6271                 if (unlikely(work_done >= budget))
6272                         break;
6273
6274                 if (tg3_flag(tp, TAGGED_STATUS)) {
6275                         /* tp->last_tag is used in tg3_int_reenable() below
6276                          * to tell the hw how much work has been processed,
6277                          * so we must read it before checking for more work.
6278                          */
6279                         tnapi->last_tag = sblk->status_tag;
6280                         tnapi->last_irq_tag = tnapi->last_tag;
6281                         rmb();
6282                 } else
6283                         sblk->status &= ~SD_STATUS_UPDATED;
6284
6285                 if (likely(!tg3_has_work(tnapi))) {
6286                         napi_complete(napi);
6287                         tg3_int_reenable(tnapi);
6288                         break;
6289                 }
6290         }
6291
6292         return work_done;
6293
6294 tx_recovery:
6295         /* work_done is guaranteed to be less than budget. */
6296         napi_complete(napi);
6297         tg3_reset_task_schedule(tp);
6298         return work_done;
6299 }
6300
6301 static void tg3_napi_disable(struct tg3 *tp)
6302 {
6303         int i;
6304
6305         for (i = tp->irq_cnt - 1; i >= 0; i--)
6306                 napi_disable(&tp->napi[i].napi);
6307 }
6308
6309 static void tg3_napi_enable(struct tg3 *tp)
6310 {
6311         int i;
6312
6313         for (i = 0; i < tp->irq_cnt; i++)
6314                 napi_enable(&tp->napi[i].napi);
6315 }
6316
6317 static void tg3_napi_init(struct tg3 *tp)
6318 {
6319         int i;
6320
6321         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6322         for (i = 1; i < tp->irq_cnt; i++)
6323                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6324 }
6325
6326 static void tg3_napi_fini(struct tg3 *tp)
6327 {
6328         int i;
6329
6330         for (i = 0; i < tp->irq_cnt; i++)
6331                 netif_napi_del(&tp->napi[i].napi);
6332 }
6333
6334 static inline void tg3_netif_stop(struct tg3 *tp)
6335 {
6336         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6337         tg3_napi_disable(tp);
6338         netif_tx_disable(tp->dev);
6339 }
6340
6341 static inline void tg3_netif_start(struct tg3 *tp)
6342 {
6343         /* NOTE: unconditional netif_tx_wake_all_queues is only
6344          * appropriate so long as all callers are assured to
6345          * have free tx slots (such as after tg3_init_hw)
6346          */
6347         netif_tx_wake_all_queues(tp->dev);
6348
6349         tg3_napi_enable(tp);
6350         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6351         tg3_enable_ints(tp);
6352 }
6353
6354 static void tg3_irq_quiesce(struct tg3 *tp)
6355 {
6356         int i;
6357
6358         BUG_ON(tp->irq_sync);
6359
6360         tp->irq_sync = 1;
6361         smp_mb();
6362
6363         for (i = 0; i < tp->irq_cnt; i++)
6364                 synchronize_irq(tp->napi[i].irq_vec);
6365 }
6366
6367 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6368  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6369  * with as well.  Most of the time, this is not necessary except when
6370  * shutting down the device.
6371  */
6372 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6373 {
6374         spin_lock_bh(&tp->lock);
6375         if (irq_sync)
6376                 tg3_irq_quiesce(tp);
6377 }
6378
6379 static inline void tg3_full_unlock(struct tg3 *tp)
6380 {
6381         spin_unlock_bh(&tp->lock);
6382 }
6383
6384 /* One-shot MSI handler - Chip automatically disables interrupt
6385  * after sending MSI so driver doesn't have to do it.
6386  */
6387 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6388 {
6389         struct tg3_napi *tnapi = dev_id;
6390         struct tg3 *tp = tnapi->tp;
6391
6392         prefetch(tnapi->hw_status);
6393         if (tnapi->rx_rcb)
6394                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6395
6396         if (likely(!tg3_irq_sync(tp)))
6397                 napi_schedule(&tnapi->napi);
6398
6399         return IRQ_HANDLED;
6400 }
6401
6402 /* MSI ISR - No need to check for interrupt sharing and no need to
6403  * flush status block and interrupt mailbox. PCI ordering rules
6404  * guarantee that MSI will arrive after the status block.
6405  */
6406 static irqreturn_t tg3_msi(int irq, void *dev_id)
6407 {
6408         struct tg3_napi *tnapi = dev_id;
6409         struct tg3 *tp = tnapi->tp;
6410
6411         prefetch(tnapi->hw_status);
6412         if (tnapi->rx_rcb)
6413                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6414         /*
6415          * Writing any value to intr-mbox-0 clears PCI INTA# and
6416          * chip-internal interrupt pending events.
6417          * Writing non-zero to intr-mbox-0 additional tells the
6418          * NIC to stop sending us irqs, engaging "in-intr-handler"
6419          * event coalescing.
6420          */
6421         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6422         if (likely(!tg3_irq_sync(tp)))
6423                 napi_schedule(&tnapi->napi);
6424
6425         return IRQ_RETVAL(1);
6426 }
6427
6428 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6429 {
6430         struct tg3_napi *tnapi = dev_id;
6431         struct tg3 *tp = tnapi->tp;
6432         struct tg3_hw_status *sblk = tnapi->hw_status;
6433         unsigned int handled = 1;
6434
6435         /* In INTx mode, it is possible for the interrupt to arrive at
6436          * the CPU before the status block posted prior to the interrupt.
6437          * Reading the PCI State register will confirm whether the
6438          * interrupt is ours and will flush the status block.
6439          */
6440         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6441                 if (tg3_flag(tp, CHIP_RESETTING) ||
6442                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6443                         handled = 0;
6444                         goto out;
6445                 }
6446         }
6447
6448         /*
6449          * Writing any value to intr-mbox-0 clears PCI INTA# and
6450          * chip-internal interrupt pending events.
6451          * Writing non-zero to intr-mbox-0 additional tells the
6452          * NIC to stop sending us irqs, engaging "in-intr-handler"
6453          * event coalescing.
6454          *
6455          * Flush the mailbox to de-assert the IRQ immediately to prevent
6456          * spurious interrupts.  The flush impacts performance but
6457          * excessive spurious interrupts can be worse in some cases.
6458          */
6459         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6460         if (tg3_irq_sync(tp))
6461                 goto out;
6462         sblk->status &= ~SD_STATUS_UPDATED;
6463         if (likely(tg3_has_work(tnapi))) {
6464                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6465                 napi_schedule(&tnapi->napi);
6466         } else {
6467                 /* No work, shared interrupt perhaps?  re-enable
6468                  * interrupts, and flush that PCI write
6469                  */
6470                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6471                                0x00000000);
6472         }
6473 out:
6474         return IRQ_RETVAL(handled);
6475 }
6476
6477 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6478 {
6479         struct tg3_napi *tnapi = dev_id;
6480         struct tg3 *tp = tnapi->tp;
6481         struct tg3_hw_status *sblk = tnapi->hw_status;
6482         unsigned int handled = 1;
6483
6484         /* In INTx mode, it is possible for the interrupt to arrive at
6485          * the CPU before the status block posted prior to the interrupt.
6486          * Reading the PCI State register will confirm whether the
6487          * interrupt is ours and will flush the status block.
6488          */
6489         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6490                 if (tg3_flag(tp, CHIP_RESETTING) ||
6491                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6492                         handled = 0;
6493                         goto out;
6494                 }
6495         }
6496
6497         /*
6498          * writing any value to intr-mbox-0 clears PCI INTA# and
6499          * chip-internal interrupt pending events.
6500          * writing non-zero to intr-mbox-0 additional tells the
6501          * NIC to stop sending us irqs, engaging "in-intr-handler"
6502          * event coalescing.
6503          *
6504          * Flush the mailbox to de-assert the IRQ immediately to prevent
6505          * spurious interrupts.  The flush impacts performance but
6506          * excessive spurious interrupts can be worse in some cases.
6507          */
6508         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6509
6510         /*
6511          * In a shared interrupt configuration, sometimes other devices'
6512          * interrupts will scream.  We record the current status tag here
6513          * so that the above check can report that the screaming interrupts
6514          * are unhandled.  Eventually they will be silenced.
6515          */
6516         tnapi->last_irq_tag = sblk->status_tag;
6517
6518         if (tg3_irq_sync(tp))
6519                 goto out;
6520
6521         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6522
6523         napi_schedule(&tnapi->napi);
6524
6525 out:
6526         return IRQ_RETVAL(handled);
6527 }
6528
6529 /* ISR for interrupt test */
6530 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6531 {
6532         struct tg3_napi *tnapi = dev_id;
6533         struct tg3 *tp = tnapi->tp;
6534         struct tg3_hw_status *sblk = tnapi->hw_status;
6535
6536         if ((sblk->status & SD_STATUS_UPDATED) ||
6537             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6538                 tg3_disable_ints(tp);
6539                 return IRQ_RETVAL(1);
6540         }
6541         return IRQ_RETVAL(0);
6542 }
6543
6544 #ifdef CONFIG_NET_POLL_CONTROLLER
6545 static void tg3_poll_controller(struct net_device *dev)
6546 {
6547         int i;
6548         struct tg3 *tp = netdev_priv(dev);
6549
6550         for (i = 0; i < tp->irq_cnt; i++)
6551                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6552 }
6553 #endif
6554
6555 static void tg3_tx_timeout(struct net_device *dev)
6556 {
6557         struct tg3 *tp = netdev_priv(dev);
6558
6559         if (netif_msg_tx_err(tp)) {
6560                 netdev_err(dev, "transmit timed out, resetting\n");
6561                 tg3_dump_state(tp);
6562         }
6563
6564         tg3_reset_task_schedule(tp);
6565 }
6566
6567 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6568 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6569 {
6570         u32 base = (u32) mapping & 0xffffffff;
6571
6572         return (base > 0xffffdcc0) && (base + len + 8 < base);
6573 }
6574
6575 /* Test for DMA addresses > 40-bit */
6576 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6577                                           int len)
6578 {
6579 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6580         if (tg3_flag(tp, 40BIT_DMA_BUG))
6581                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6582         return 0;
6583 #else
6584         return 0;
6585 #endif
6586 }
6587
6588 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6589                                  dma_addr_t mapping, u32 len, u32 flags,
6590                                  u32 mss, u32 vlan)
6591 {
6592         txbd->addr_hi = ((u64) mapping >> 32);
6593         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6594         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6595         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6596 }
6597
6598 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6599                             dma_addr_t map, u32 len, u32 flags,
6600                             u32 mss, u32 vlan)
6601 {
6602         struct tg3 *tp = tnapi->tp;
6603         bool hwbug = false;
6604
6605         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6606                 hwbug = true;
6607
6608         if (tg3_4g_overflow_test(map, len))
6609                 hwbug = true;
6610
6611         if (tg3_40bit_overflow_test(tp, map, len))
6612                 hwbug = true;
6613
6614         if (tp->dma_limit) {
6615                 u32 prvidx = *entry;
6616                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6617                 while (len > tp->dma_limit && *budget) {
6618                         u32 frag_len = tp->dma_limit;
6619                         len -= tp->dma_limit;
6620
6621                         /* Avoid the 8byte DMA problem */
6622                         if (len <= 8) {
6623                                 len += tp->dma_limit / 2;
6624                                 frag_len = tp->dma_limit / 2;
6625                         }
6626
6627                         tnapi->tx_buffers[*entry].fragmented = true;
6628
6629                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6630                                       frag_len, tmp_flag, mss, vlan);
6631                         *budget -= 1;
6632                         prvidx = *entry;
6633                         *entry = NEXT_TX(*entry);
6634
6635                         map += frag_len;
6636                 }
6637
6638                 if (len) {
6639                         if (*budget) {
6640                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6641                                               len, flags, mss, vlan);
6642                                 *budget -= 1;
6643                                 *entry = NEXT_TX(*entry);
6644                         } else {
6645                                 hwbug = true;
6646                                 tnapi->tx_buffers[prvidx].fragmented = false;
6647                         }
6648                 }
6649         } else {
6650                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6651                               len, flags, mss, vlan);
6652                 *entry = NEXT_TX(*entry);
6653         }
6654
6655         return hwbug;
6656 }
6657
6658 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6659 {
6660         int i;
6661         struct sk_buff *skb;
6662         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6663
6664         skb = txb->skb;
6665         txb->skb = NULL;
6666
6667         pci_unmap_single(tnapi->tp->pdev,
6668                          dma_unmap_addr(txb, mapping),
6669                          skb_headlen(skb),
6670                          PCI_DMA_TODEVICE);
6671
6672         while (txb->fragmented) {
6673                 txb->fragmented = false;
6674                 entry = NEXT_TX(entry);
6675                 txb = &tnapi->tx_buffers[entry];
6676         }
6677
6678         for (i = 0; i <= last; i++) {
6679                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6680
6681                 entry = NEXT_TX(entry);
6682                 txb = &tnapi->tx_buffers[entry];
6683
6684                 pci_unmap_page(tnapi->tp->pdev,
6685                                dma_unmap_addr(txb, mapping),
6686                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6687
6688                 while (txb->fragmented) {
6689                         txb->fragmented = false;
6690                         entry = NEXT_TX(entry);
6691                         txb = &tnapi->tx_buffers[entry];
6692                 }
6693         }
6694 }
6695
6696 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6697 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6698                                        struct sk_buff **pskb,
6699                                        u32 *entry, u32 *budget,
6700                                        u32 base_flags, u32 mss, u32 vlan)
6701 {
6702         struct tg3 *tp = tnapi->tp;
6703         struct sk_buff *new_skb, *skb = *pskb;
6704         dma_addr_t new_addr = 0;
6705         int ret = 0;
6706
6707         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6708                 new_skb = skb_copy(skb, GFP_ATOMIC);
6709         else {
6710                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6711
6712                 new_skb = skb_copy_expand(skb,
6713                                           skb_headroom(skb) + more_headroom,
6714                                           skb_tailroom(skb), GFP_ATOMIC);
6715         }
6716
6717         if (!new_skb) {
6718                 ret = -1;
6719         } else {
6720                 /* New SKB is guaranteed to be linear. */
6721                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6722                                           PCI_DMA_TODEVICE);
6723                 /* Make sure the mapping succeeded */
6724                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6725                         dev_kfree_skb(new_skb);
6726                         ret = -1;
6727                 } else {
6728                         u32 save_entry = *entry;
6729
6730                         base_flags |= TXD_FLAG_END;
6731
6732                         tnapi->tx_buffers[*entry].skb = new_skb;
6733                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6734                                            mapping, new_addr);
6735
6736                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6737                                             new_skb->len, base_flags,
6738                                             mss, vlan)) {
6739                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6740                                 dev_kfree_skb(new_skb);
6741                                 ret = -1;
6742                         }
6743                 }
6744         }
6745
6746         dev_kfree_skb(skb);
6747         *pskb = new_skb;
6748         return ret;
6749 }
6750
6751 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6752
6753 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6754  * TSO header is greater than 80 bytes.
6755  */
6756 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6757 {
6758         struct sk_buff *segs, *nskb;
6759         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6760
6761         /* Estimate the number of fragments in the worst case */
6762         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6763                 netif_stop_queue(tp->dev);
6764
6765                 /* netif_tx_stop_queue() must be done before checking
6766                  * checking tx index in tg3_tx_avail() below, because in
6767                  * tg3_tx(), we update tx index before checking for
6768                  * netif_tx_queue_stopped().
6769                  */
6770                 smp_mb();
6771                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6772                         return NETDEV_TX_BUSY;
6773
6774                 netif_wake_queue(tp->dev);
6775         }
6776
6777         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6778         if (IS_ERR(segs))
6779                 goto tg3_tso_bug_end;
6780
6781         do {
6782                 nskb = segs;
6783                 segs = segs->next;
6784                 nskb->next = NULL;
6785                 tg3_start_xmit(nskb, tp->dev);
6786         } while (segs);
6787
6788 tg3_tso_bug_end:
6789         dev_kfree_skb(skb);
6790
6791         return NETDEV_TX_OK;
6792 }
6793
6794 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6795  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6796  */
6797 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6798 {
6799         struct tg3 *tp = netdev_priv(dev);
6800         u32 len, entry, base_flags, mss, vlan = 0;
6801         u32 budget;
6802         int i = -1, would_hit_hwbug;
6803         dma_addr_t mapping;
6804         struct tg3_napi *tnapi;
6805         struct netdev_queue *txq;
6806         unsigned int last;
6807
6808         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6809         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6810         if (tg3_flag(tp, ENABLE_TSS))
6811                 tnapi++;
6812
6813         budget = tg3_tx_avail(tnapi);
6814
6815         /* We are running in BH disabled context with netif_tx_lock
6816          * and TX reclaim runs via tp->napi.poll inside of a software
6817          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6818          * no IRQ context deadlocks to worry about either.  Rejoice!
6819          */
6820         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6821                 if (!netif_tx_queue_stopped(txq)) {
6822                         netif_tx_stop_queue(txq);
6823
6824                         /* This is a hard error, log it. */
6825                         netdev_err(dev,
6826                                    "BUG! Tx Ring full when queue awake!\n");
6827                 }
6828                 return NETDEV_TX_BUSY;
6829         }
6830
6831         entry = tnapi->tx_prod;
6832         base_flags = 0;
6833         if (skb->ip_summed == CHECKSUM_PARTIAL)
6834                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6835
6836         mss = skb_shinfo(skb)->gso_size;
6837         if (mss) {
6838                 struct iphdr *iph;
6839                 u32 tcp_opt_len, hdr_len;
6840
6841                 if (skb_header_cloned(skb) &&
6842                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6843                         goto drop;
6844
6845                 iph = ip_hdr(skb);
6846                 tcp_opt_len = tcp_optlen(skb);
6847
6848                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6849
6850                 if (!skb_is_gso_v6(skb)) {
6851                         iph->check = 0;
6852                         iph->tot_len = htons(mss + hdr_len);
6853                 }
6854
6855                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6856                     tg3_flag(tp, TSO_BUG))
6857                         return tg3_tso_bug(tp, skb);
6858
6859                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6860                                TXD_FLAG_CPU_POST_DMA);
6861
6862                 if (tg3_flag(tp, HW_TSO_1) ||
6863                     tg3_flag(tp, HW_TSO_2) ||
6864                     tg3_flag(tp, HW_TSO_3)) {
6865                         tcp_hdr(skb)->check = 0;
6866                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6867                 } else
6868                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6869                                                                  iph->daddr, 0,
6870                                                                  IPPROTO_TCP,
6871                                                                  0);
6872
6873                 if (tg3_flag(tp, HW_TSO_3)) {
6874                         mss |= (hdr_len & 0xc) << 12;
6875                         if (hdr_len & 0x10)
6876                                 base_flags |= 0x00000010;
6877                         base_flags |= (hdr_len & 0x3e0) << 5;
6878                 } else if (tg3_flag(tp, HW_TSO_2))
6879                         mss |= hdr_len << 9;
6880                 else if (tg3_flag(tp, HW_TSO_1) ||
6881                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6882                         if (tcp_opt_len || iph->ihl > 5) {
6883                                 int tsflags;
6884
6885                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6886                                 mss |= (tsflags << 11);
6887                         }
6888                 } else {
6889                         if (tcp_opt_len || iph->ihl > 5) {
6890                                 int tsflags;
6891
6892                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6893                                 base_flags |= tsflags << 12;
6894                         }
6895                 }
6896         }
6897
6898         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6899             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6900                 base_flags |= TXD_FLAG_JMB_PKT;
6901
6902         if (vlan_tx_tag_present(skb)) {
6903                 base_flags |= TXD_FLAG_VLAN;
6904                 vlan = vlan_tx_tag_get(skb);
6905         }
6906
6907         len = skb_headlen(skb);
6908
6909         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6910         if (pci_dma_mapping_error(tp->pdev, mapping))
6911                 goto drop;
6912
6913
6914         tnapi->tx_buffers[entry].skb = skb;
6915         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6916
6917         would_hit_hwbug = 0;
6918
6919         if (tg3_flag(tp, 5701_DMA_BUG))
6920                 would_hit_hwbug = 1;
6921
6922         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6923                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6924                             mss, vlan)) {
6925                 would_hit_hwbug = 1;
6926         } else if (skb_shinfo(skb)->nr_frags > 0) {
6927                 u32 tmp_mss = mss;
6928
6929                 if (!tg3_flag(tp, HW_TSO_1) &&
6930                     !tg3_flag(tp, HW_TSO_2) &&
6931                     !tg3_flag(tp, HW_TSO_3))
6932                         tmp_mss = 0;
6933
6934                 /* Now loop through additional data
6935                  * fragments, and queue them.
6936                  */
6937                 last = skb_shinfo(skb)->nr_frags - 1;
6938                 for (i = 0; i <= last; i++) {
6939                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6940
6941                         len = skb_frag_size(frag);
6942                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6943                                                    len, DMA_TO_DEVICE);
6944
6945                         tnapi->tx_buffers[entry].skb = NULL;
6946                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6947                                            mapping);
6948                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6949                                 goto dma_error;
6950
6951                         if (!budget ||
6952                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6953                                             len, base_flags |
6954                                             ((i == last) ? TXD_FLAG_END : 0),
6955                                             tmp_mss, vlan)) {
6956                                 would_hit_hwbug = 1;
6957                                 break;
6958                         }
6959                 }
6960         }
6961
6962         if (would_hit_hwbug) {
6963                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6964
6965                 /* If the workaround fails due to memory/mapping
6966                  * failure, silently drop this packet.
6967                  */
6968                 entry = tnapi->tx_prod;
6969                 budget = tg3_tx_avail(tnapi);
6970                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6971                                                 base_flags, mss, vlan))
6972                         goto drop_nofree;
6973         }
6974
6975         skb_tx_timestamp(skb);
6976         netdev_sent_queue(tp->dev, skb->len);
6977
6978         /* Packets are ready, update Tx producer idx local and on card. */
6979         tw32_tx_mbox(tnapi->prodmbox, entry);
6980
6981         tnapi->tx_prod = entry;
6982         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6983                 netif_tx_stop_queue(txq);
6984
6985                 /* netif_tx_stop_queue() must be done before checking
6986                  * checking tx index in tg3_tx_avail() below, because in
6987                  * tg3_tx(), we update tx index before checking for
6988                  * netif_tx_queue_stopped().
6989                  */
6990                 smp_mb();
6991                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6992                         netif_tx_wake_queue(txq);
6993         }
6994
6995         mmiowb();
6996         return NETDEV_TX_OK;
6997
6998 dma_error:
6999         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7000         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7001 drop:
7002         dev_kfree_skb(skb);
7003 drop_nofree:
7004         tp->tx_dropped++;
7005         return NETDEV_TX_OK;
7006 }
7007
7008 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7009 {
7010         if (enable) {
7011                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7012                                   MAC_MODE_PORT_MODE_MASK);
7013
7014                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7015
7016                 if (!tg3_flag(tp, 5705_PLUS))
7017                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7018
7019                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7020                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7021                 else
7022                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7023         } else {
7024                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7025
7026                 if (tg3_flag(tp, 5705_PLUS) ||
7027                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7028                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7029                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7030         }
7031
7032         tw32(MAC_MODE, tp->mac_mode);
7033         udelay(40);
7034 }
7035
7036 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7037 {
7038         u32 val, bmcr, mac_mode, ptest = 0;
7039
7040         tg3_phy_toggle_apd(tp, false);
7041         tg3_phy_toggle_automdix(tp, 0);
7042
7043         if (extlpbk && tg3_phy_set_extloopbk(tp))
7044                 return -EIO;
7045
7046         bmcr = BMCR_FULLDPLX;
7047         switch (speed) {
7048         case SPEED_10:
7049                 break;
7050         case SPEED_100:
7051                 bmcr |= BMCR_SPEED100;
7052                 break;
7053         case SPEED_1000:
7054         default:
7055                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7056                         speed = SPEED_100;
7057                         bmcr |= BMCR_SPEED100;
7058                 } else {
7059                         speed = SPEED_1000;
7060                         bmcr |= BMCR_SPEED1000;
7061                 }
7062         }
7063
7064         if (extlpbk) {
7065                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7066                         tg3_readphy(tp, MII_CTRL1000, &val);
7067                         val |= CTL1000_AS_MASTER |
7068                                CTL1000_ENABLE_MASTER;
7069                         tg3_writephy(tp, MII_CTRL1000, val);
7070                 } else {
7071                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7072                                 MII_TG3_FET_PTEST_TRIM_2;
7073                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7074                 }
7075         } else
7076                 bmcr |= BMCR_LOOPBACK;
7077
7078         tg3_writephy(tp, MII_BMCR, bmcr);
7079
7080         /* The write needs to be flushed for the FETs */
7081         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7082                 tg3_readphy(tp, MII_BMCR, &bmcr);
7083
7084         udelay(40);
7085
7086         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7087             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7088                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7089                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7090                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7091
7092                 /* The write needs to be flushed for the AC131 */
7093                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7094         }
7095
7096         /* Reset to prevent losing 1st rx packet intermittently */
7097         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7098             tg3_flag(tp, 5780_CLASS)) {
7099                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7100                 udelay(10);
7101                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7102         }
7103
7104         mac_mode = tp->mac_mode &
7105                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7106         if (speed == SPEED_1000)
7107                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7108         else
7109                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7110
7111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7112                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7113
7114                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7115                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7116                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7117                         mac_mode |= MAC_MODE_LINK_POLARITY;
7118
7119                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7120                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7121         }
7122
7123         tw32(MAC_MODE, mac_mode);
7124         udelay(40);
7125
7126         return 0;
7127 }
7128
7129 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7130 {
7131         struct tg3 *tp = netdev_priv(dev);
7132
7133         if (features & NETIF_F_LOOPBACK) {
7134                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7135                         return;
7136
7137                 spin_lock_bh(&tp->lock);
7138                 tg3_mac_loopback(tp, true);
7139                 netif_carrier_on(tp->dev);
7140                 spin_unlock_bh(&tp->lock);
7141                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7142         } else {
7143                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7144                         return;
7145
7146                 spin_lock_bh(&tp->lock);
7147                 tg3_mac_loopback(tp, false);
7148                 /* Force link status check */
7149                 tg3_setup_phy(tp, 1);
7150                 spin_unlock_bh(&tp->lock);
7151                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7152         }
7153 }
7154
7155 static netdev_features_t tg3_fix_features(struct net_device *dev,
7156         netdev_features_t features)
7157 {
7158         struct tg3 *tp = netdev_priv(dev);
7159
7160         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7161                 features &= ~NETIF_F_ALL_TSO;
7162
7163         return features;
7164 }
7165
7166 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7167 {
7168         netdev_features_t changed = dev->features ^ features;
7169
7170         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7171                 tg3_set_loopback(dev, features);
7172
7173         return 0;
7174 }
7175
7176 static void tg3_rx_prodring_free(struct tg3 *tp,
7177                                  struct tg3_rx_prodring_set *tpr)
7178 {
7179         int i;
7180
7181         if (tpr != &tp->napi[0].prodring) {
7182                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7183                      i = (i + 1) & tp->rx_std_ring_mask)
7184                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7185                                         tp->rx_pkt_map_sz);
7186
7187                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7188                         for (i = tpr->rx_jmb_cons_idx;
7189                              i != tpr->rx_jmb_prod_idx;
7190                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7191                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7192                                                 TG3_RX_JMB_MAP_SZ);
7193                         }
7194                 }
7195
7196                 return;
7197         }
7198
7199         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7200                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7201                                 tp->rx_pkt_map_sz);
7202
7203         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7204                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7205                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7206                                         TG3_RX_JMB_MAP_SZ);
7207         }
7208 }
7209
7210 /* Initialize rx rings for packet processing.
7211  *
7212  * The chip has been shut down and the driver detached from
7213  * the networking, so no interrupts or new tx packets will
7214  * end up in the driver.  tp->{tx,}lock are held and thus
7215  * we may not sleep.
7216  */
7217 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7218                                  struct tg3_rx_prodring_set *tpr)
7219 {
7220         u32 i, rx_pkt_dma_sz;
7221
7222         tpr->rx_std_cons_idx = 0;
7223         tpr->rx_std_prod_idx = 0;
7224         tpr->rx_jmb_cons_idx = 0;
7225         tpr->rx_jmb_prod_idx = 0;
7226
7227         if (tpr != &tp->napi[0].prodring) {
7228                 memset(&tpr->rx_std_buffers[0], 0,
7229                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7230                 if (tpr->rx_jmb_buffers)
7231                         memset(&tpr->rx_jmb_buffers[0], 0,
7232                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7233                 goto done;
7234         }
7235
7236         /* Zero out all descriptors. */
7237         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7238
7239         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7240         if (tg3_flag(tp, 5780_CLASS) &&
7241             tp->dev->mtu > ETH_DATA_LEN)
7242                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7243         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7244
7245         /* Initialize invariants of the rings, we only set this
7246          * stuff once.  This works because the card does not
7247          * write into the rx buffer posting rings.
7248          */
7249         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7250                 struct tg3_rx_buffer_desc *rxd;
7251
7252                 rxd = &tpr->rx_std[i];
7253                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7254                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7255                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7256                                (i << RXD_OPAQUE_INDEX_SHIFT));
7257         }
7258
7259         /* Now allocate fresh SKBs for each rx ring. */
7260         for (i = 0; i < tp->rx_pending; i++) {
7261                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7262                         netdev_warn(tp->dev,
7263                                     "Using a smaller RX standard ring. Only "
7264                                     "%d out of %d buffers were allocated "
7265                                     "successfully\n", i, tp->rx_pending);
7266                         if (i == 0)
7267                                 goto initfail;
7268                         tp->rx_pending = i;
7269                         break;
7270                 }
7271         }
7272
7273         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7274                 goto done;
7275
7276         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7277
7278         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7279                 goto done;
7280
7281         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7282                 struct tg3_rx_buffer_desc *rxd;
7283
7284                 rxd = &tpr->rx_jmb[i].std;
7285                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7286                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7287                                   RXD_FLAG_JUMBO;
7288                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7289                        (i << RXD_OPAQUE_INDEX_SHIFT));
7290         }
7291
7292         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7293                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7294                         netdev_warn(tp->dev,
7295                                     "Using a smaller RX jumbo ring. Only %d "
7296                                     "out of %d buffers were allocated "
7297                                     "successfully\n", i, tp->rx_jumbo_pending);
7298                         if (i == 0)
7299                                 goto initfail;
7300                         tp->rx_jumbo_pending = i;
7301                         break;
7302                 }
7303         }
7304
7305 done:
7306         return 0;
7307
7308 initfail:
7309         tg3_rx_prodring_free(tp, tpr);
7310         return -ENOMEM;
7311 }
7312
7313 static void tg3_rx_prodring_fini(struct tg3 *tp,
7314                                  struct tg3_rx_prodring_set *tpr)
7315 {
7316         kfree(tpr->rx_std_buffers);
7317         tpr->rx_std_buffers = NULL;
7318         kfree(tpr->rx_jmb_buffers);
7319         tpr->rx_jmb_buffers = NULL;
7320         if (tpr->rx_std) {
7321                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7322                                   tpr->rx_std, tpr->rx_std_mapping);
7323                 tpr->rx_std = NULL;
7324         }
7325         if (tpr->rx_jmb) {
7326                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7327                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7328                 tpr->rx_jmb = NULL;
7329         }
7330 }
7331
7332 static int tg3_rx_prodring_init(struct tg3 *tp,
7333                                 struct tg3_rx_prodring_set *tpr)
7334 {
7335         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7336                                       GFP_KERNEL);
7337         if (!tpr->rx_std_buffers)
7338                 return -ENOMEM;
7339
7340         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7341                                          TG3_RX_STD_RING_BYTES(tp),
7342                                          &tpr->rx_std_mapping,
7343                                          GFP_KERNEL);
7344         if (!tpr->rx_std)
7345                 goto err_out;
7346
7347         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7348                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7349                                               GFP_KERNEL);
7350                 if (!tpr->rx_jmb_buffers)
7351                         goto err_out;
7352
7353                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7354                                                  TG3_RX_JMB_RING_BYTES(tp),
7355                                                  &tpr->rx_jmb_mapping,
7356                                                  GFP_KERNEL);
7357                 if (!tpr->rx_jmb)
7358                         goto err_out;
7359         }
7360
7361         return 0;
7362
7363 err_out:
7364         tg3_rx_prodring_fini(tp, tpr);
7365         return -ENOMEM;
7366 }
7367
7368 /* Free up pending packets in all rx/tx rings.
7369  *
7370  * The chip has been shut down and the driver detached from
7371  * the networking, so no interrupts or new tx packets will
7372  * end up in the driver.  tp->{tx,}lock is not held and we are not
7373  * in an interrupt context and thus may sleep.
7374  */
7375 static void tg3_free_rings(struct tg3 *tp)
7376 {
7377         int i, j;
7378
7379         for (j = 0; j < tp->irq_cnt; j++) {
7380                 struct tg3_napi *tnapi = &tp->napi[j];
7381
7382                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7383
7384                 if (!tnapi->tx_buffers)
7385                         continue;
7386
7387                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7388                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7389
7390                         if (!skb)
7391                                 continue;
7392
7393                         tg3_tx_skb_unmap(tnapi, i,
7394                                          skb_shinfo(skb)->nr_frags - 1);
7395
7396                         dev_kfree_skb_any(skb);
7397                 }
7398         }
7399         netdev_reset_queue(tp->dev);
7400 }
7401
7402 /* Initialize tx/rx rings for packet processing.
7403  *
7404  * The chip has been shut down and the driver detached from
7405  * the networking, so no interrupts or new tx packets will
7406  * end up in the driver.  tp->{tx,}lock are held and thus
7407  * we may not sleep.
7408  */
7409 static int tg3_init_rings(struct tg3 *tp)
7410 {
7411         int i;
7412
7413         /* Free up all the SKBs. */
7414         tg3_free_rings(tp);
7415
7416         for (i = 0; i < tp->irq_cnt; i++) {
7417                 struct tg3_napi *tnapi = &tp->napi[i];
7418
7419                 tnapi->last_tag = 0;
7420                 tnapi->last_irq_tag = 0;
7421                 tnapi->hw_status->status = 0;
7422                 tnapi->hw_status->status_tag = 0;
7423                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7424
7425                 tnapi->tx_prod = 0;
7426                 tnapi->tx_cons = 0;
7427                 if (tnapi->tx_ring)
7428                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7429
7430                 tnapi->rx_rcb_ptr = 0;
7431                 if (tnapi->rx_rcb)
7432                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7433
7434                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7435                         tg3_free_rings(tp);
7436                         return -ENOMEM;
7437                 }
7438         }
7439
7440         return 0;
7441 }
7442
7443 /*
7444  * Must not be invoked with interrupt sources disabled and
7445  * the hardware shutdown down.
7446  */
7447 static void tg3_free_consistent(struct tg3 *tp)
7448 {
7449         int i;
7450
7451         for (i = 0; i < tp->irq_cnt; i++) {
7452                 struct tg3_napi *tnapi = &tp->napi[i];
7453
7454                 if (tnapi->tx_ring) {
7455                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7456                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7457                         tnapi->tx_ring = NULL;
7458                 }
7459
7460                 kfree(tnapi->tx_buffers);
7461                 tnapi->tx_buffers = NULL;
7462
7463                 if (tnapi->rx_rcb) {
7464                         dma_free_coherent(&tp->pdev->dev,
7465                                           TG3_RX_RCB_RING_BYTES(tp),
7466                                           tnapi->rx_rcb,
7467                                           tnapi->rx_rcb_mapping);
7468                         tnapi->rx_rcb = NULL;
7469                 }
7470
7471                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7472
7473                 if (tnapi->hw_status) {
7474                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7475                                           tnapi->hw_status,
7476                                           tnapi->status_mapping);
7477                         tnapi->hw_status = NULL;
7478                 }
7479         }
7480
7481         if (tp->hw_stats) {
7482                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7483                                   tp->hw_stats, tp->stats_mapping);
7484                 tp->hw_stats = NULL;
7485         }
7486 }
7487
7488 /*
7489  * Must not be invoked with interrupt sources disabled and
7490  * the hardware shutdown down.  Can sleep.
7491  */
7492 static int tg3_alloc_consistent(struct tg3 *tp)
7493 {
7494         int i;
7495
7496         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7497                                           sizeof(struct tg3_hw_stats),
7498                                           &tp->stats_mapping,
7499                                           GFP_KERNEL);
7500         if (!tp->hw_stats)
7501                 goto err_out;
7502
7503         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7504
7505         for (i = 0; i < tp->irq_cnt; i++) {
7506                 struct tg3_napi *tnapi = &tp->napi[i];
7507                 struct tg3_hw_status *sblk;
7508
7509                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7510                                                       TG3_HW_STATUS_SIZE,
7511                                                       &tnapi->status_mapping,
7512                                                       GFP_KERNEL);
7513                 if (!tnapi->hw_status)
7514                         goto err_out;
7515
7516                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7517                 sblk = tnapi->hw_status;
7518
7519                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7520                         goto err_out;
7521
7522                 /* If multivector TSS is enabled, vector 0 does not handle
7523                  * tx interrupts.  Don't allocate any resources for it.
7524                  */
7525                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7526                     (i && tg3_flag(tp, ENABLE_TSS))) {
7527                         tnapi->tx_buffers = kzalloc(
7528                                                sizeof(struct tg3_tx_ring_info) *
7529                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7530                         if (!tnapi->tx_buffers)
7531                                 goto err_out;
7532
7533                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7534                                                             TG3_TX_RING_BYTES,
7535                                                         &tnapi->tx_desc_mapping,
7536                                                             GFP_KERNEL);
7537                         if (!tnapi->tx_ring)
7538                                 goto err_out;
7539                 }
7540
7541                 /*
7542                  * When RSS is enabled, the status block format changes
7543                  * slightly.  The "rx_jumbo_consumer", "reserved",
7544                  * and "rx_mini_consumer" members get mapped to the
7545                  * other three rx return ring producer indexes.
7546                  */
7547                 switch (i) {
7548                 default:
7549                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7550                         break;
7551                 case 2:
7552                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7553                         break;
7554                 case 3:
7555                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7556                         break;
7557                 case 4:
7558                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7559                         break;
7560                 }
7561
7562                 /*
7563                  * If multivector RSS is enabled, vector 0 does not handle
7564                  * rx or tx interrupts.  Don't allocate any resources for it.
7565                  */
7566                 if (!i && tg3_flag(tp, ENABLE_RSS))
7567                         continue;
7568
7569                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7570                                                    TG3_RX_RCB_RING_BYTES(tp),
7571                                                    &tnapi->rx_rcb_mapping,
7572                                                    GFP_KERNEL);
7573                 if (!tnapi->rx_rcb)
7574                         goto err_out;
7575
7576                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7577         }
7578
7579         return 0;
7580
7581 err_out:
7582         tg3_free_consistent(tp);
7583         return -ENOMEM;
7584 }
7585
7586 #define MAX_WAIT_CNT 1000
7587
7588 /* To stop a block, clear the enable bit and poll till it
7589  * clears.  tp->lock is held.
7590  */
7591 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7592 {
7593         unsigned int i;
7594         u32 val;
7595
7596         if (tg3_flag(tp, 5705_PLUS)) {
7597                 switch (ofs) {
7598                 case RCVLSC_MODE:
7599                 case DMAC_MODE:
7600                 case MBFREE_MODE:
7601                 case BUFMGR_MODE:
7602                 case MEMARB_MODE:
7603                         /* We can't enable/disable these bits of the
7604                          * 5705/5750, just say success.
7605                          */
7606                         return 0;
7607
7608                 default:
7609                         break;
7610                 }
7611         }
7612
7613         val = tr32(ofs);
7614         val &= ~enable_bit;
7615         tw32_f(ofs, val);
7616
7617         for (i = 0; i < MAX_WAIT_CNT; i++) {
7618                 udelay(100);
7619                 val = tr32(ofs);
7620                 if ((val & enable_bit) == 0)
7621                         break;
7622         }
7623
7624         if (i == MAX_WAIT_CNT && !silent) {
7625                 dev_err(&tp->pdev->dev,
7626                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7627                         ofs, enable_bit);
7628                 return -ENODEV;
7629         }
7630
7631         return 0;
7632 }
7633
7634 /* tp->lock is held. */
7635 static int tg3_abort_hw(struct tg3 *tp, int silent)
7636 {
7637         int i, err;
7638
7639         tg3_disable_ints(tp);
7640
7641         tp->rx_mode &= ~RX_MODE_ENABLE;
7642         tw32_f(MAC_RX_MODE, tp->rx_mode);
7643         udelay(10);
7644
7645         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7646         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7647         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7648         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7649         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7650         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7651
7652         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7653         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7654         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7655         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7656         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7657         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7658         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7659
7660         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7661         tw32_f(MAC_MODE, tp->mac_mode);
7662         udelay(40);
7663
7664         tp->tx_mode &= ~TX_MODE_ENABLE;
7665         tw32_f(MAC_TX_MODE, tp->tx_mode);
7666
7667         for (i = 0; i < MAX_WAIT_CNT; i++) {
7668                 udelay(100);
7669                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7670                         break;
7671         }
7672         if (i >= MAX_WAIT_CNT) {
7673                 dev_err(&tp->pdev->dev,
7674                         "%s timed out, TX_MODE_ENABLE will not clear "
7675                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7676                 err |= -ENODEV;
7677         }
7678
7679         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7680         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7681         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7682
7683         tw32(FTQ_RESET, 0xffffffff);
7684         tw32(FTQ_RESET, 0x00000000);
7685
7686         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7687         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7688
7689         for (i = 0; i < tp->irq_cnt; i++) {
7690                 struct tg3_napi *tnapi = &tp->napi[i];
7691                 if (tnapi->hw_status)
7692                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7693         }
7694
7695         return err;
7696 }
7697
7698 /* Save PCI command register before chip reset */
7699 static void tg3_save_pci_state(struct tg3 *tp)
7700 {
7701         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7702 }
7703
7704 /* Restore PCI state after chip reset */
7705 static void tg3_restore_pci_state(struct tg3 *tp)
7706 {
7707         u32 val;
7708
7709         /* Re-enable indirect register accesses. */
7710         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7711                                tp->misc_host_ctrl);
7712
7713         /* Set MAX PCI retry to zero. */
7714         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7715         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7716             tg3_flag(tp, PCIX_MODE))
7717                 val |= PCISTATE_RETRY_SAME_DMA;
7718         /* Allow reads and writes to the APE register and memory space. */
7719         if (tg3_flag(tp, ENABLE_APE))
7720                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7721                        PCISTATE_ALLOW_APE_SHMEM_WR |
7722                        PCISTATE_ALLOW_APE_PSPACE_WR;
7723         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7724
7725         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7726
7727         if (!tg3_flag(tp, PCI_EXPRESS)) {
7728                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7729                                       tp->pci_cacheline_sz);
7730                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7731                                       tp->pci_lat_timer);
7732         }
7733
7734         /* Make sure PCI-X relaxed ordering bit is clear. */
7735         if (tg3_flag(tp, PCIX_MODE)) {
7736                 u16 pcix_cmd;
7737
7738                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7739                                      &pcix_cmd);
7740                 pcix_cmd &= ~PCI_X_CMD_ERO;
7741                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7742                                       pcix_cmd);
7743         }
7744
7745         if (tg3_flag(tp, 5780_CLASS)) {
7746
7747                 /* Chip reset on 5780 will reset MSI enable bit,
7748                  * so need to restore it.
7749                  */
7750                 if (tg3_flag(tp, USING_MSI)) {
7751                         u16 ctrl;
7752
7753                         pci_read_config_word(tp->pdev,
7754                                              tp->msi_cap + PCI_MSI_FLAGS,
7755                                              &ctrl);
7756                         pci_write_config_word(tp->pdev,
7757                                               tp->msi_cap + PCI_MSI_FLAGS,
7758                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7759                         val = tr32(MSGINT_MODE);
7760                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7761                 }
7762         }
7763 }
7764
7765 /* tp->lock is held. */
7766 static int tg3_chip_reset(struct tg3 *tp)
7767 {
7768         u32 val;
7769         void (*write_op)(struct tg3 *, u32, u32);
7770         int i, err;
7771
7772         tg3_nvram_lock(tp);
7773
7774         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7775
7776         /* No matching tg3_nvram_unlock() after this because
7777          * chip reset below will undo the nvram lock.
7778          */
7779         tp->nvram_lock_cnt = 0;
7780
7781         /* GRC_MISC_CFG core clock reset will clear the memory
7782          * enable bit in PCI register 4 and the MSI enable bit
7783          * on some chips, so we save relevant registers here.
7784          */
7785         tg3_save_pci_state(tp);
7786
7787         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7788             tg3_flag(tp, 5755_PLUS))
7789                 tw32(GRC_FASTBOOT_PC, 0);
7790
7791         /*
7792          * We must avoid the readl() that normally takes place.
7793          * It locks machines, causes machine checks, and other
7794          * fun things.  So, temporarily disable the 5701
7795          * hardware workaround, while we do the reset.
7796          */
7797         write_op = tp->write32;
7798         if (write_op == tg3_write_flush_reg32)
7799                 tp->write32 = tg3_write32;
7800
7801         /* Prevent the irq handler from reading or writing PCI registers
7802          * during chip reset when the memory enable bit in the PCI command
7803          * register may be cleared.  The chip does not generate interrupt
7804          * at this time, but the irq handler may still be called due to irq
7805          * sharing or irqpoll.
7806          */
7807         tg3_flag_set(tp, CHIP_RESETTING);
7808         for (i = 0; i < tp->irq_cnt; i++) {
7809                 struct tg3_napi *tnapi = &tp->napi[i];
7810                 if (tnapi->hw_status) {
7811                         tnapi->hw_status->status = 0;
7812                         tnapi->hw_status->status_tag = 0;
7813                 }
7814                 tnapi->last_tag = 0;
7815                 tnapi->last_irq_tag = 0;
7816         }
7817         smp_mb();
7818
7819         for (i = 0; i < tp->irq_cnt; i++)
7820                 synchronize_irq(tp->napi[i].irq_vec);
7821
7822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7823                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7824                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7825         }
7826
7827         /* do the reset */
7828         val = GRC_MISC_CFG_CORECLK_RESET;
7829
7830         if (tg3_flag(tp, PCI_EXPRESS)) {
7831                 /* Force PCIe 1.0a mode */
7832                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7833                     !tg3_flag(tp, 57765_PLUS) &&
7834                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7835                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7836                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7837
7838                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7839                         tw32(GRC_MISC_CFG, (1 << 29));
7840                         val |= (1 << 29);
7841                 }
7842         }
7843
7844         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7845                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7846                 tw32(GRC_VCPU_EXT_CTRL,
7847                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7848         }
7849
7850         /* Manage gphy power for all CPMU absent PCIe devices. */
7851         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7852                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7853
7854         tw32(GRC_MISC_CFG, val);
7855
7856         /* restore 5701 hardware bug workaround write method */
7857         tp->write32 = write_op;
7858
7859         /* Unfortunately, we have to delay before the PCI read back.
7860          * Some 575X chips even will not respond to a PCI cfg access
7861          * when the reset command is given to the chip.
7862          *
7863          * How do these hardware designers expect things to work
7864          * properly if the PCI write is posted for a long period
7865          * of time?  It is always necessary to have some method by
7866          * which a register read back can occur to push the write
7867          * out which does the reset.
7868          *
7869          * For most tg3 variants the trick below was working.
7870          * Ho hum...
7871          */
7872         udelay(120);
7873
7874         /* Flush PCI posted writes.  The normal MMIO registers
7875          * are inaccessible at this time so this is the only
7876          * way to make this reliably (actually, this is no longer
7877          * the case, see above).  I tried to use indirect
7878          * register read/write but this upset some 5701 variants.
7879          */
7880         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7881
7882         udelay(120);
7883
7884         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7885                 u16 val16;
7886
7887                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7888                         int i;
7889                         u32 cfg_val;
7890
7891                         /* Wait for link training to complete.  */
7892                         for (i = 0; i < 5000; i++)
7893                                 udelay(100);
7894
7895                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7896                         pci_write_config_dword(tp->pdev, 0xc4,
7897                                                cfg_val | (1 << 15));
7898                 }
7899
7900                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7901                 pci_read_config_word(tp->pdev,
7902                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7903                                      &val16);
7904                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7905                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7906                 /*
7907                  * Older PCIe devices only support the 128 byte
7908                  * MPS setting.  Enforce the restriction.
7909                  */
7910                 if (!tg3_flag(tp, CPMU_PRESENT))
7911                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7912                 pci_write_config_word(tp->pdev,
7913                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7914                                       val16);
7915
7916                 /* Clear error status */
7917                 pci_write_config_word(tp->pdev,
7918                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7919                                       PCI_EXP_DEVSTA_CED |
7920                                       PCI_EXP_DEVSTA_NFED |
7921                                       PCI_EXP_DEVSTA_FED |
7922                                       PCI_EXP_DEVSTA_URD);
7923         }
7924
7925         tg3_restore_pci_state(tp);
7926
7927         tg3_flag_clear(tp, CHIP_RESETTING);
7928         tg3_flag_clear(tp, ERROR_PROCESSED);
7929
7930         val = 0;
7931         if (tg3_flag(tp, 5780_CLASS))
7932                 val = tr32(MEMARB_MODE);
7933         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7934
7935         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7936                 tg3_stop_fw(tp);
7937                 tw32(0x5000, 0x400);
7938         }
7939
7940         tw32(GRC_MODE, tp->grc_mode);
7941
7942         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7943                 val = tr32(0xc4);
7944
7945                 tw32(0xc4, val | (1 << 15));
7946         }
7947
7948         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7950                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7951                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7952                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7953                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7954         }
7955
7956         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7957                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7958                 val = tp->mac_mode;
7959         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7960                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7961                 val = tp->mac_mode;
7962         } else
7963                 val = 0;
7964
7965         tw32_f(MAC_MODE, val);
7966         udelay(40);
7967
7968         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7969
7970         err = tg3_poll_fw(tp);
7971         if (err)
7972                 return err;
7973
7974         tg3_mdio_start(tp);
7975
7976         if (tg3_flag(tp, PCI_EXPRESS) &&
7977             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7978             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7979             !tg3_flag(tp, 57765_PLUS)) {
7980                 val = tr32(0x7c00);
7981
7982                 tw32(0x7c00, val | (1 << 25));
7983         }
7984
7985         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7986                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7987                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7988         }
7989
7990         /* Reprobe ASF enable state.  */
7991         tg3_flag_clear(tp, ENABLE_ASF);
7992         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7993         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7994         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7995                 u32 nic_cfg;
7996
7997                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7998                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7999                         tg3_flag_set(tp, ENABLE_ASF);
8000                         tp->last_event_jiffies = jiffies;
8001                         if (tg3_flag(tp, 5750_PLUS))
8002                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8003                 }
8004         }
8005
8006         return 0;
8007 }
8008
8009 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
8010                                                  struct rtnl_link_stats64 *);
8011 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
8012                                                 struct tg3_ethtool_stats *);
8013
8014 /* tp->lock is held. */
8015 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8016 {
8017         int err;
8018
8019         tg3_stop_fw(tp);
8020
8021         tg3_write_sig_pre_reset(tp, kind);
8022
8023         tg3_abort_hw(tp, silent);
8024         err = tg3_chip_reset(tp);
8025
8026         __tg3_set_mac_addr(tp, 0);
8027
8028         tg3_write_sig_legacy(tp, kind);
8029         tg3_write_sig_post_reset(tp, kind);
8030
8031         if (tp->hw_stats) {
8032                 /* Save the stats across chip resets... */
8033                 tg3_get_stats64(tp->dev, &tp->net_stats_prev),
8034                 tg3_get_estats(tp, &tp->estats_prev);
8035
8036                 /* And make sure the next sample is new data */
8037                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8038         }
8039
8040         if (err)
8041                 return err;
8042
8043         return 0;
8044 }
8045
8046 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8047 {
8048         struct tg3 *tp = netdev_priv(dev);
8049         struct sockaddr *addr = p;
8050         int err = 0, skip_mac_1 = 0;
8051
8052         if (!is_valid_ether_addr(addr->sa_data))
8053                 return -EINVAL;
8054
8055         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8056
8057         if (!netif_running(dev))
8058                 return 0;
8059
8060         if (tg3_flag(tp, ENABLE_ASF)) {
8061                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8062
8063                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8064                 addr0_low = tr32(MAC_ADDR_0_LOW);
8065                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8066                 addr1_low = tr32(MAC_ADDR_1_LOW);
8067
8068                 /* Skip MAC addr 1 if ASF is using it. */
8069                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8070                     !(addr1_high == 0 && addr1_low == 0))
8071                         skip_mac_1 = 1;
8072         }
8073         spin_lock_bh(&tp->lock);
8074         __tg3_set_mac_addr(tp, skip_mac_1);
8075         spin_unlock_bh(&tp->lock);
8076
8077         return err;
8078 }
8079
8080 /* tp->lock is held. */
8081 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8082                            dma_addr_t mapping, u32 maxlen_flags,
8083                            u32 nic_addr)
8084 {
8085         tg3_write_mem(tp,
8086                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8087                       ((u64) mapping >> 32));
8088         tg3_write_mem(tp,
8089                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8090                       ((u64) mapping & 0xffffffff));
8091         tg3_write_mem(tp,
8092                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8093                        maxlen_flags);
8094
8095         if (!tg3_flag(tp, 5705_PLUS))
8096                 tg3_write_mem(tp,
8097                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8098                               nic_addr);
8099 }
8100
8101 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8102 {
8103         int i;
8104
8105         if (!tg3_flag(tp, ENABLE_TSS)) {
8106                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8107                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8108                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8109         } else {
8110                 tw32(HOSTCC_TXCOL_TICKS, 0);
8111                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8112                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8113         }
8114
8115         if (!tg3_flag(tp, ENABLE_RSS)) {
8116                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8117                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8118                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8119         } else {
8120                 tw32(HOSTCC_RXCOL_TICKS, 0);
8121                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8122                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8123         }
8124
8125         if (!tg3_flag(tp, 5705_PLUS)) {
8126                 u32 val = ec->stats_block_coalesce_usecs;
8127
8128                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8129                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8130
8131                 if (!netif_carrier_ok(tp->dev))
8132                         val = 0;
8133
8134                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8135         }
8136
8137         for (i = 0; i < tp->irq_cnt - 1; i++) {
8138                 u32 reg;
8139
8140                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8141                 tw32(reg, ec->rx_coalesce_usecs);
8142                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8143                 tw32(reg, ec->rx_max_coalesced_frames);
8144                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8145                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8146
8147                 if (tg3_flag(tp, ENABLE_TSS)) {
8148                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8149                         tw32(reg, ec->tx_coalesce_usecs);
8150                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8151                         tw32(reg, ec->tx_max_coalesced_frames);
8152                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8153                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8154                 }
8155         }
8156
8157         for (; i < tp->irq_max - 1; i++) {
8158                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8159                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8160                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8161
8162                 if (tg3_flag(tp, ENABLE_TSS)) {
8163                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8164                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8165                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8166                 }
8167         }
8168 }
8169
8170 /* tp->lock is held. */
8171 static void tg3_rings_reset(struct tg3 *tp)
8172 {
8173         int i;
8174         u32 stblk, txrcb, rxrcb, limit;
8175         struct tg3_napi *tnapi = &tp->napi[0];
8176
8177         /* Disable all transmit rings but the first. */
8178         if (!tg3_flag(tp, 5705_PLUS))
8179                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8180         else if (tg3_flag(tp, 5717_PLUS))
8181                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8182         else if (tg3_flag(tp, 57765_CLASS))
8183                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8184         else
8185                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8186
8187         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8188              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8189                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8190                               BDINFO_FLAGS_DISABLED);
8191
8192
8193         /* Disable all receive return rings but the first. */
8194         if (tg3_flag(tp, 5717_PLUS))
8195                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8196         else if (!tg3_flag(tp, 5705_PLUS))
8197                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8198         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8199                  tg3_flag(tp, 57765_CLASS))
8200                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8201         else
8202                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8203
8204         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8205              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8206                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8207                               BDINFO_FLAGS_DISABLED);
8208
8209         /* Disable interrupts */
8210         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8211         tp->napi[0].chk_msi_cnt = 0;
8212         tp->napi[0].last_rx_cons = 0;
8213         tp->napi[0].last_tx_cons = 0;
8214
8215         /* Zero mailbox registers. */
8216         if (tg3_flag(tp, SUPPORT_MSIX)) {
8217                 for (i = 1; i < tp->irq_max; i++) {
8218                         tp->napi[i].tx_prod = 0;
8219                         tp->napi[i].tx_cons = 0;
8220                         if (tg3_flag(tp, ENABLE_TSS))
8221                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8222                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8223                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8224                         tp->napi[i].chk_msi_cnt = 0;
8225                         tp->napi[i].last_rx_cons = 0;
8226                         tp->napi[i].last_tx_cons = 0;
8227                 }
8228                 if (!tg3_flag(tp, ENABLE_TSS))
8229                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8230         } else {
8231                 tp->napi[0].tx_prod = 0;
8232                 tp->napi[0].tx_cons = 0;
8233                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8234                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8235         }
8236
8237         /* Make sure the NIC-based send BD rings are disabled. */
8238         if (!tg3_flag(tp, 5705_PLUS)) {
8239                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8240                 for (i = 0; i < 16; i++)
8241                         tw32_tx_mbox(mbox + i * 8, 0);
8242         }
8243
8244         txrcb = NIC_SRAM_SEND_RCB;
8245         rxrcb = NIC_SRAM_RCV_RET_RCB;
8246
8247         /* Clear status block in ram. */
8248         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8249
8250         /* Set status block DMA address */
8251         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8252              ((u64) tnapi->status_mapping >> 32));
8253         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8254              ((u64) tnapi->status_mapping & 0xffffffff));
8255
8256         if (tnapi->tx_ring) {
8257                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8258                                (TG3_TX_RING_SIZE <<
8259                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8260                                NIC_SRAM_TX_BUFFER_DESC);
8261                 txrcb += TG3_BDINFO_SIZE;
8262         }
8263
8264         if (tnapi->rx_rcb) {
8265                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8266                                (tp->rx_ret_ring_mask + 1) <<
8267                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8268                 rxrcb += TG3_BDINFO_SIZE;
8269         }
8270
8271         stblk = HOSTCC_STATBLCK_RING1;
8272
8273         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8274                 u64 mapping = (u64)tnapi->status_mapping;
8275                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8276                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8277
8278                 /* Clear status block in ram. */
8279                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8280
8281                 if (tnapi->tx_ring) {
8282                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8283                                        (TG3_TX_RING_SIZE <<
8284                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8285                                        NIC_SRAM_TX_BUFFER_DESC);
8286                         txrcb += TG3_BDINFO_SIZE;
8287                 }
8288
8289                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8290                                ((tp->rx_ret_ring_mask + 1) <<
8291                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8292
8293                 stblk += 8;
8294                 rxrcb += TG3_BDINFO_SIZE;
8295         }
8296 }
8297
8298 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8299 {
8300         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8301
8302         if (!tg3_flag(tp, 5750_PLUS) ||
8303             tg3_flag(tp, 5780_CLASS) ||
8304             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8305             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8306             tg3_flag(tp, 57765_PLUS))
8307                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8308         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8309                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8310                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8311         else
8312                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8313
8314         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8315         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8316
8317         val = min(nic_rep_thresh, host_rep_thresh);
8318         tw32(RCVBDI_STD_THRESH, val);
8319
8320         if (tg3_flag(tp, 57765_PLUS))
8321                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8322
8323         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8324                 return;
8325
8326         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8327
8328         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8329
8330         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8331         tw32(RCVBDI_JUMBO_THRESH, val);
8332
8333         if (tg3_flag(tp, 57765_PLUS))
8334                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8335 }
8336
8337 static inline u32 calc_crc(unsigned char *buf, int len)
8338 {
8339         u32 reg;
8340         u32 tmp;
8341         int j, k;
8342
8343         reg = 0xffffffff;
8344
8345         for (j = 0; j < len; j++) {
8346                 reg ^= buf[j];
8347
8348                 for (k = 0; k < 8; k++) {
8349                         tmp = reg & 0x01;
8350
8351                         reg >>= 1;
8352
8353                         if (tmp)
8354                                 reg ^= 0xedb88320;
8355                 }
8356         }
8357
8358         return ~reg;
8359 }
8360
8361 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8362 {
8363         /* accept or reject all multicast frames */
8364         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8365         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8366         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8367         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8368 }
8369
8370 static void __tg3_set_rx_mode(struct net_device *dev)
8371 {
8372         struct tg3 *tp = netdev_priv(dev);
8373         u32 rx_mode;
8374
8375         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8376                                   RX_MODE_KEEP_VLAN_TAG);
8377
8378 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8379         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8380          * flag clear.
8381          */
8382         if (!tg3_flag(tp, ENABLE_ASF))
8383                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8384 #endif
8385
8386         if (dev->flags & IFF_PROMISC) {
8387                 /* Promiscuous mode. */
8388                 rx_mode |= RX_MODE_PROMISC;
8389         } else if (dev->flags & IFF_ALLMULTI) {
8390                 /* Accept all multicast. */
8391                 tg3_set_multi(tp, 1);
8392         } else if (netdev_mc_empty(dev)) {
8393                 /* Reject all multicast. */
8394                 tg3_set_multi(tp, 0);
8395         } else {
8396                 /* Accept one or more multicast(s). */
8397                 struct netdev_hw_addr *ha;
8398                 u32 mc_filter[4] = { 0, };
8399                 u32 regidx;
8400                 u32 bit;
8401                 u32 crc;
8402
8403                 netdev_for_each_mc_addr(ha, dev) {
8404                         crc = calc_crc(ha->addr, ETH_ALEN);
8405                         bit = ~crc & 0x7f;
8406                         regidx = (bit & 0x60) >> 5;
8407                         bit &= 0x1f;
8408                         mc_filter[regidx] |= (1 << bit);
8409                 }
8410
8411                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8412                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8413                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8414                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8415         }
8416
8417         if (rx_mode != tp->rx_mode) {
8418                 tp->rx_mode = rx_mode;
8419                 tw32_f(MAC_RX_MODE, rx_mode);
8420                 udelay(10);
8421         }
8422 }
8423
8424 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8425 {
8426         int i;
8427
8428         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8429                 tp->rss_ind_tbl[i] =
8430                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8431 }
8432
8433 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8434 {
8435         int i;
8436
8437         if (!tg3_flag(tp, SUPPORT_MSIX))
8438                 return;
8439
8440         if (tp->irq_cnt <= 2) {
8441                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8442                 return;
8443         }
8444
8445         /* Validate table against current IRQ count */
8446         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8447                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8448                         break;
8449         }
8450
8451         if (i != TG3_RSS_INDIR_TBL_SIZE)
8452                 tg3_rss_init_dflt_indir_tbl(tp);
8453 }
8454
8455 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8456 {
8457         int i = 0;
8458         u32 reg = MAC_RSS_INDIR_TBL_0;
8459
8460         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8461                 u32 val = tp->rss_ind_tbl[i];
8462                 i++;
8463                 for (; i % 8; i++) {
8464                         val <<= 4;
8465                         val |= tp->rss_ind_tbl[i];
8466                 }
8467                 tw32(reg, val);
8468                 reg += 4;
8469         }
8470 }
8471
8472 /* tp->lock is held. */
8473 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8474 {
8475         u32 val, rdmac_mode;
8476         int i, err, limit;
8477         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8478
8479         tg3_disable_ints(tp);
8480
8481         tg3_stop_fw(tp);
8482
8483         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8484
8485         if (tg3_flag(tp, INIT_COMPLETE))
8486                 tg3_abort_hw(tp, 1);
8487
8488         /* Enable MAC control of LPI */
8489         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8490                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8491                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8492                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8493
8494                 tw32_f(TG3_CPMU_EEE_CTRL,
8495                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8496
8497                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8498                       TG3_CPMU_EEEMD_LPI_IN_TX |
8499                       TG3_CPMU_EEEMD_LPI_IN_RX |
8500                       TG3_CPMU_EEEMD_EEE_ENABLE;
8501
8502                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8503                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8504
8505                 if (tg3_flag(tp, ENABLE_APE))
8506                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8507
8508                 tw32_f(TG3_CPMU_EEE_MODE, val);
8509
8510                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8511                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8512                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8513
8514                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8515                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8516                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8517         }
8518
8519         if (reset_phy)
8520                 tg3_phy_reset(tp);
8521
8522         err = tg3_chip_reset(tp);
8523         if (err)
8524                 return err;
8525
8526         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8527
8528         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8529                 val = tr32(TG3_CPMU_CTRL);
8530                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8531                 tw32(TG3_CPMU_CTRL, val);
8532
8533                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8534                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8535                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8536                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8537
8538                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8539                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8540                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8541                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8542
8543                 val = tr32(TG3_CPMU_HST_ACC);
8544                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8545                 val |= CPMU_HST_ACC_MACCLK_6_25;
8546                 tw32(TG3_CPMU_HST_ACC, val);
8547         }
8548
8549         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8550                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8551                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8552                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8553                 tw32(PCIE_PWR_MGMT_THRESH, val);
8554
8555                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8556                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8557
8558                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8559
8560                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8561                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8562         }
8563
8564         if (tg3_flag(tp, L1PLLPD_EN)) {
8565                 u32 grc_mode = tr32(GRC_MODE);
8566
8567                 /* Access the lower 1K of PL PCIE block registers. */
8568                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8569                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8570
8571                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8572                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8573                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8574
8575                 tw32(GRC_MODE, grc_mode);
8576         }
8577
8578         if (tg3_flag(tp, 57765_CLASS)) {
8579                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8580                         u32 grc_mode = tr32(GRC_MODE);
8581
8582                         /* Access the lower 1K of PL PCIE block registers. */
8583                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8584                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8585
8586                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8587                                    TG3_PCIE_PL_LO_PHYCTL5);
8588                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8589                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8590
8591                         tw32(GRC_MODE, grc_mode);
8592                 }
8593
8594                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8595                         u32 grc_mode = tr32(GRC_MODE);
8596
8597                         /* Access the lower 1K of DL PCIE block registers. */
8598                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8599                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8600
8601                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8602                                    TG3_PCIE_DL_LO_FTSMAX);
8603                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8604                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8605                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8606
8607                         tw32(GRC_MODE, grc_mode);
8608                 }
8609
8610                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8611                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8612                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8613                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8614         }
8615
8616         /* This works around an issue with Athlon chipsets on
8617          * B3 tigon3 silicon.  This bit has no effect on any
8618          * other revision.  But do not set this on PCI Express
8619          * chips and don't even touch the clocks if the CPMU is present.
8620          */
8621         if (!tg3_flag(tp, CPMU_PRESENT)) {
8622                 if (!tg3_flag(tp, PCI_EXPRESS))
8623                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8624                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8625         }
8626
8627         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8628             tg3_flag(tp, PCIX_MODE)) {
8629                 val = tr32(TG3PCI_PCISTATE);
8630                 val |= PCISTATE_RETRY_SAME_DMA;
8631                 tw32(TG3PCI_PCISTATE, val);
8632         }
8633
8634         if (tg3_flag(tp, ENABLE_APE)) {
8635                 /* Allow reads and writes to the
8636                  * APE register and memory space.
8637                  */
8638                 val = tr32(TG3PCI_PCISTATE);
8639                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8640                        PCISTATE_ALLOW_APE_SHMEM_WR |
8641                        PCISTATE_ALLOW_APE_PSPACE_WR;
8642                 tw32(TG3PCI_PCISTATE, val);
8643         }
8644
8645         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8646                 /* Enable some hw fixes.  */
8647                 val = tr32(TG3PCI_MSI_DATA);
8648                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8649                 tw32(TG3PCI_MSI_DATA, val);
8650         }
8651
8652         /* Descriptor ring init may make accesses to the
8653          * NIC SRAM area to setup the TX descriptors, so we
8654          * can only do this after the hardware has been
8655          * successfully reset.
8656          */
8657         err = tg3_init_rings(tp);
8658         if (err)
8659                 return err;
8660
8661         if (tg3_flag(tp, 57765_PLUS)) {
8662                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8663                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8664                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8665                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8666                 if (!tg3_flag(tp, 57765_CLASS) &&
8667                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8668                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8669                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8670         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8671                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8672                 /* This value is determined during the probe time DMA
8673                  * engine test, tg3_test_dma.
8674                  */
8675                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8676         }
8677
8678         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8679                           GRC_MODE_4X_NIC_SEND_RINGS |
8680                           GRC_MODE_NO_TX_PHDR_CSUM |
8681                           GRC_MODE_NO_RX_PHDR_CSUM);
8682         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8683
8684         /* Pseudo-header checksum is done by hardware logic and not
8685          * the offload processers, so make the chip do the pseudo-
8686          * header checksums on receive.  For transmit it is more
8687          * convenient to do the pseudo-header checksum in software
8688          * as Linux does that on transmit for us in all cases.
8689          */
8690         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8691
8692         tw32(GRC_MODE,
8693              tp->grc_mode |
8694              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8695
8696         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8697         val = tr32(GRC_MISC_CFG);
8698         val &= ~0xff;
8699         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8700         tw32(GRC_MISC_CFG, val);
8701
8702         /* Initialize MBUF/DESC pool. */
8703         if (tg3_flag(tp, 5750_PLUS)) {
8704                 /* Do nothing.  */
8705         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8706                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8707                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8708                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8709                 else
8710                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8711                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8712                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8713         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8714                 int fw_len;
8715
8716                 fw_len = tp->fw_len;
8717                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8718                 tw32(BUFMGR_MB_POOL_ADDR,
8719                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8720                 tw32(BUFMGR_MB_POOL_SIZE,
8721                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8722         }
8723
8724         if (tp->dev->mtu <= ETH_DATA_LEN) {
8725                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8726                      tp->bufmgr_config.mbuf_read_dma_low_water);
8727                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8728                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8729                 tw32(BUFMGR_MB_HIGH_WATER,
8730                      tp->bufmgr_config.mbuf_high_water);
8731         } else {
8732                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8733                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8734                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8735                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8736                 tw32(BUFMGR_MB_HIGH_WATER,
8737                      tp->bufmgr_config.mbuf_high_water_jumbo);
8738         }
8739         tw32(BUFMGR_DMA_LOW_WATER,
8740              tp->bufmgr_config.dma_low_water);
8741         tw32(BUFMGR_DMA_HIGH_WATER,
8742              tp->bufmgr_config.dma_high_water);
8743
8744         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8745         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8746                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8747         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8748             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8749             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8750                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8751         tw32(BUFMGR_MODE, val);
8752         for (i = 0; i < 2000; i++) {
8753                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8754                         break;
8755                 udelay(10);
8756         }
8757         if (i >= 2000) {
8758                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8759                 return -ENODEV;
8760         }
8761
8762         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8763                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8764
8765         tg3_setup_rxbd_thresholds(tp);
8766
8767         /* Initialize TG3_BDINFO's at:
8768          *  RCVDBDI_STD_BD:     standard eth size rx ring
8769          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8770          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8771          *
8772          * like so:
8773          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8774          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8775          *                              ring attribute flags
8776          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8777          *
8778          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8779          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8780          *
8781          * The size of each ring is fixed in the firmware, but the location is
8782          * configurable.
8783          */
8784         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8785              ((u64) tpr->rx_std_mapping >> 32));
8786         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8787              ((u64) tpr->rx_std_mapping & 0xffffffff));
8788         if (!tg3_flag(tp, 5717_PLUS))
8789                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8790                      NIC_SRAM_RX_BUFFER_DESC);
8791
8792         /* Disable the mini ring */
8793         if (!tg3_flag(tp, 5705_PLUS))
8794                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8795                      BDINFO_FLAGS_DISABLED);
8796
8797         /* Program the jumbo buffer descriptor ring control
8798          * blocks on those devices that have them.
8799          */
8800         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8801             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8802
8803                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8804                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8805                              ((u64) tpr->rx_jmb_mapping >> 32));
8806                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8807                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8808                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8809                               BDINFO_FLAGS_MAXLEN_SHIFT;
8810                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8811                              val | BDINFO_FLAGS_USE_EXT_RECV);
8812                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8813                             tg3_flag(tp, 57765_CLASS))
8814                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8815                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8816                 } else {
8817                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8818                              BDINFO_FLAGS_DISABLED);
8819                 }
8820
8821                 if (tg3_flag(tp, 57765_PLUS)) {
8822                         val = TG3_RX_STD_RING_SIZE(tp);
8823                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8824                         val |= (TG3_RX_STD_DMA_SZ << 2);
8825                 } else
8826                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8827         } else
8828                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8829
8830         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8831
8832         tpr->rx_std_prod_idx = tp->rx_pending;
8833         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8834
8835         tpr->rx_jmb_prod_idx =
8836                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8837         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8838
8839         tg3_rings_reset(tp);
8840
8841         /* Initialize MAC address and backoff seed. */
8842         __tg3_set_mac_addr(tp, 0);
8843
8844         /* MTU + ethernet header + FCS + optional VLAN tag */
8845         tw32(MAC_RX_MTU_SIZE,
8846              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8847
8848         /* The slot time is changed by tg3_setup_phy if we
8849          * run at gigabit with half duplex.
8850          */
8851         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8852               (6 << TX_LENGTHS_IPG_SHIFT) |
8853               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8854
8855         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8856                 val |= tr32(MAC_TX_LENGTHS) &
8857                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8858                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8859
8860         tw32(MAC_TX_LENGTHS, val);
8861
8862         /* Receive rules. */
8863         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8864         tw32(RCVLPC_CONFIG, 0x0181);
8865
8866         /* Calculate RDMAC_MODE setting early, we need it to determine
8867          * the RCVLPC_STATE_ENABLE mask.
8868          */
8869         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8870                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8871                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8872                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8873                       RDMAC_MODE_LNGREAD_ENAB);
8874
8875         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8876                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8877
8878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8880             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8881                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8882                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8883                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8884
8885         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8886             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8887                 if (tg3_flag(tp, TSO_CAPABLE) &&
8888                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8889                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8890                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8891                            !tg3_flag(tp, IS_5788)) {
8892                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8893                 }
8894         }
8895
8896         if (tg3_flag(tp, PCI_EXPRESS))
8897                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8898
8899         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
8900                 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
8901
8902         if (tg3_flag(tp, HW_TSO_1) ||
8903             tg3_flag(tp, HW_TSO_2) ||
8904             tg3_flag(tp, HW_TSO_3))
8905                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8906
8907         if (tg3_flag(tp, 57765_PLUS) ||
8908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8909             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8910                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8911
8912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8913                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8914
8915         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8916             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8917             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8918             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8919             tg3_flag(tp, 57765_PLUS)) {
8920                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8921                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8922                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8923                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8924                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8925                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8926                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8927                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8928                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8929                 }
8930                 tw32(TG3_RDMA_RSRVCTRL_REG,
8931                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8932         }
8933
8934         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8935             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8936                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8937                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8938                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8939                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8940         }
8941
8942         /* Receive/send statistics. */
8943         if (tg3_flag(tp, 5750_PLUS)) {
8944                 val = tr32(RCVLPC_STATS_ENABLE);
8945                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8946                 tw32(RCVLPC_STATS_ENABLE, val);
8947         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8948                    tg3_flag(tp, TSO_CAPABLE)) {
8949                 val = tr32(RCVLPC_STATS_ENABLE);
8950                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8951                 tw32(RCVLPC_STATS_ENABLE, val);
8952         } else {
8953                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8954         }
8955         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8956         tw32(SNDDATAI_STATSENAB, 0xffffff);
8957         tw32(SNDDATAI_STATSCTRL,
8958              (SNDDATAI_SCTRL_ENABLE |
8959               SNDDATAI_SCTRL_FASTUPD));
8960
8961         /* Setup host coalescing engine. */
8962         tw32(HOSTCC_MODE, 0);
8963         for (i = 0; i < 2000; i++) {
8964                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8965                         break;
8966                 udelay(10);
8967         }
8968
8969         __tg3_set_coalesce(tp, &tp->coal);
8970
8971         if (!tg3_flag(tp, 5705_PLUS)) {
8972                 /* Status/statistics block address.  See tg3_timer,
8973                  * the tg3_periodic_fetch_stats call there, and
8974                  * tg3_get_stats to see how this works for 5705/5750 chips.
8975                  */
8976                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8977                      ((u64) tp->stats_mapping >> 32));
8978                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8979                      ((u64) tp->stats_mapping & 0xffffffff));
8980                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8981
8982                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8983
8984                 /* Clear statistics and status block memory areas */
8985                 for (i = NIC_SRAM_STATS_BLK;
8986                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8987                      i += sizeof(u32)) {
8988                         tg3_write_mem(tp, i, 0);
8989                         udelay(40);
8990                 }
8991         }
8992
8993         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8994
8995         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8996         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8997         if (!tg3_flag(tp, 5705_PLUS))
8998                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8999
9000         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9001                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9002                 /* reset to prevent losing 1st rx packet intermittently */
9003                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9004                 udelay(10);
9005         }
9006
9007         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9008                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9009                         MAC_MODE_FHDE_ENABLE;
9010         if (tg3_flag(tp, ENABLE_APE))
9011                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9012         if (!tg3_flag(tp, 5705_PLUS) &&
9013             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9014             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9015                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9016         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9017         udelay(40);
9018
9019         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9020          * If TG3_FLAG_IS_NIC is zero, we should read the
9021          * register to preserve the GPIO settings for LOMs. The GPIOs,
9022          * whether used as inputs or outputs, are set by boot code after
9023          * reset.
9024          */
9025         if (!tg3_flag(tp, IS_NIC)) {
9026                 u32 gpio_mask;
9027
9028                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9029                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9030                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9031
9032                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9033                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9034                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9035
9036                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9037                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9038
9039                 tp->grc_local_ctrl &= ~gpio_mask;
9040                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9041
9042                 /* GPIO1 must be driven high for eeprom write protect */
9043                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9044                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9045                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9046         }
9047         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9048         udelay(100);
9049
9050         if (tg3_flag(tp, USING_MSIX)) {
9051                 val = tr32(MSGINT_MODE);
9052                 val |= MSGINT_MODE_ENABLE;
9053                 if (tp->irq_cnt > 1)
9054                         val |= MSGINT_MODE_MULTIVEC_EN;
9055                 if (!tg3_flag(tp, 1SHOT_MSI))
9056                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9057                 tw32(MSGINT_MODE, val);
9058         }
9059
9060         if (!tg3_flag(tp, 5705_PLUS)) {
9061                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9062                 udelay(40);
9063         }
9064
9065         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9066                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9067                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9068                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9069                WDMAC_MODE_LNGREAD_ENAB);
9070
9071         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9072             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9073                 if (tg3_flag(tp, TSO_CAPABLE) &&
9074                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9075                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9076                         /* nothing */
9077                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9078                            !tg3_flag(tp, IS_5788)) {
9079                         val |= WDMAC_MODE_RX_ACCEL;
9080                 }
9081         }
9082
9083         /* Enable host coalescing bug fix */
9084         if (tg3_flag(tp, 5755_PLUS))
9085                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9086
9087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9088                 val |= WDMAC_MODE_BURST_ALL_DATA;
9089
9090         tw32_f(WDMAC_MODE, val);
9091         udelay(40);
9092
9093         if (tg3_flag(tp, PCIX_MODE)) {
9094                 u16 pcix_cmd;
9095
9096                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9097                                      &pcix_cmd);
9098                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9099                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9100                         pcix_cmd |= PCI_X_CMD_READ_2K;
9101                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9102                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9103                         pcix_cmd |= PCI_X_CMD_READ_2K;
9104                 }
9105                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9106                                       pcix_cmd);
9107         }
9108
9109         tw32_f(RDMAC_MODE, rdmac_mode);
9110         udelay(40);
9111
9112         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9113         if (!tg3_flag(tp, 5705_PLUS))
9114                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9115
9116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9117                 tw32(SNDDATAC_MODE,
9118                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9119         else
9120                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9121
9122         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9123         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9124         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9125         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9126                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9127         tw32(RCVDBDI_MODE, val);
9128         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9129         if (tg3_flag(tp, HW_TSO_1) ||
9130             tg3_flag(tp, HW_TSO_2) ||
9131             tg3_flag(tp, HW_TSO_3))
9132                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9133         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9134         if (tg3_flag(tp, ENABLE_TSS))
9135                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9136         tw32(SNDBDI_MODE, val);
9137         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9138
9139         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9140                 err = tg3_load_5701_a0_firmware_fix(tp);
9141                 if (err)
9142                         return err;
9143         }
9144
9145         if (tg3_flag(tp, TSO_CAPABLE)) {
9146                 err = tg3_load_tso_firmware(tp);
9147                 if (err)
9148                         return err;
9149         }
9150
9151         tp->tx_mode = TX_MODE_ENABLE;
9152
9153         if (tg3_flag(tp, 5755_PLUS) ||
9154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9155                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9156
9157         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9158                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9159                 tp->tx_mode &= ~val;
9160                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9161         }
9162
9163         tw32_f(MAC_TX_MODE, tp->tx_mode);
9164         udelay(100);
9165
9166         if (tg3_flag(tp, ENABLE_RSS)) {
9167                 tg3_rss_write_indir_tbl(tp);
9168
9169                 /* Setup the "secret" hash key. */
9170                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9171                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9172                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9173                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9174                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9175                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9176                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9177                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9178                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9179                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9180         }
9181
9182         tp->rx_mode = RX_MODE_ENABLE;
9183         if (tg3_flag(tp, 5755_PLUS))
9184                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9185
9186         if (tg3_flag(tp, ENABLE_RSS))
9187                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9188                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9189                                RX_MODE_RSS_IPV6_HASH_EN |
9190                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9191                                RX_MODE_RSS_IPV4_HASH_EN |
9192                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9193
9194         tw32_f(MAC_RX_MODE, tp->rx_mode);
9195         udelay(10);
9196
9197         tw32(MAC_LED_CTRL, tp->led_ctrl);
9198
9199         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9200         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9201                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9202                 udelay(10);
9203         }
9204         tw32_f(MAC_RX_MODE, tp->rx_mode);
9205         udelay(10);
9206
9207         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9208                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9209                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9210                         /* Set drive transmission level to 1.2V  */
9211                         /* only if the signal pre-emphasis bit is not set  */
9212                         val = tr32(MAC_SERDES_CFG);
9213                         val &= 0xfffff000;
9214                         val |= 0x880;
9215                         tw32(MAC_SERDES_CFG, val);
9216                 }
9217                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9218                         tw32(MAC_SERDES_CFG, 0x616000);
9219         }
9220
9221         /* Prevent chip from dropping frames when flow control
9222          * is enabled.
9223          */
9224         if (tg3_flag(tp, 57765_CLASS))
9225                 val = 1;
9226         else
9227                 val = 2;
9228         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9229
9230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9231             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9232                 /* Use hardware link auto-negotiation */
9233                 tg3_flag_set(tp, HW_AUTONEG);
9234         }
9235
9236         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9237             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9238                 u32 tmp;
9239
9240                 tmp = tr32(SERDES_RX_CTRL);
9241                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9242                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9243                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9244                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9245         }
9246
9247         if (!tg3_flag(tp, USE_PHYLIB)) {
9248                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9249                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9250                         tp->link_config.speed = tp->link_config.orig_speed;
9251                         tp->link_config.duplex = tp->link_config.orig_duplex;
9252                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9253                 }
9254
9255                 err = tg3_setup_phy(tp, 0);
9256                 if (err)
9257                         return err;
9258
9259                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9260                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9261                         u32 tmp;
9262
9263                         /* Clear CRC stats. */
9264                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9265                                 tg3_writephy(tp, MII_TG3_TEST1,
9266                                              tmp | MII_TG3_TEST1_CRC_EN);
9267                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9268                         }
9269                 }
9270         }
9271
9272         __tg3_set_rx_mode(tp->dev);
9273
9274         /* Initialize receive rules. */
9275         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9276         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9277         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9278         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9279
9280         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9281                 limit = 8;
9282         else
9283                 limit = 16;
9284         if (tg3_flag(tp, ENABLE_ASF))
9285                 limit -= 4;
9286         switch (limit) {
9287         case 16:
9288                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9289         case 15:
9290                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9291         case 14:
9292                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9293         case 13:
9294                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9295         case 12:
9296                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9297         case 11:
9298                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9299         case 10:
9300                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9301         case 9:
9302                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9303         case 8:
9304                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9305         case 7:
9306                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9307         case 6:
9308                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9309         case 5:
9310                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9311         case 4:
9312                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9313         case 3:
9314                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9315         case 2:
9316         case 1:
9317
9318         default:
9319                 break;
9320         }
9321
9322         if (tg3_flag(tp, ENABLE_APE))
9323                 /* Write our heartbeat update interval to APE. */
9324                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9325                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9326
9327         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9328
9329         return 0;
9330 }
9331
9332 /* Called at device open time to get the chip ready for
9333  * packet processing.  Invoked with tp->lock held.
9334  */
9335 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9336 {
9337         tg3_switch_clocks(tp);
9338
9339         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9340
9341         return tg3_reset_hw(tp, reset_phy);
9342 }
9343
9344 /* Restart hardware after configuration changes, self-test, etc.
9345  * Invoked with tp->lock held.
9346  */
9347 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9348         __releases(tp->lock)
9349         __acquires(tp->lock)
9350 {
9351         int err;
9352
9353         err = tg3_init_hw(tp, reset_phy);
9354         if (err) {
9355                 netdev_err(tp->dev,
9356                            "Failed to re-initialize device, aborting\n");
9357                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9358                 tg3_full_unlock(tp);
9359                 del_timer_sync(&tp->timer);
9360                 tp->irq_sync = 0;
9361                 tg3_napi_enable(tp);
9362                 dev_close(tp->dev);
9363                 tg3_full_lock(tp, 0);
9364         }
9365         return err;
9366 }
9367
9368 static void tg3_reset_task(struct work_struct *work)
9369 {
9370         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9371         int err;
9372
9373         tg3_full_lock(tp, 0);
9374
9375         if (!netif_running(tp->dev)) {
9376                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9377                 tg3_full_unlock(tp);
9378                 return;
9379         }
9380
9381         tg3_full_unlock(tp);
9382
9383         tg3_phy_stop(tp);
9384
9385         tg3_netif_stop(tp);
9386
9387         tg3_full_lock(tp, 1);
9388
9389         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9390                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9391                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9392                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9393                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9394         }
9395
9396         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9397         err = tg3_init_hw(tp, 1);
9398         if (err)
9399                 goto out;
9400
9401         tg3_netif_start(tp);
9402
9403 out:
9404         tg3_full_unlock(tp);
9405
9406         if (!err)
9407                 tg3_phy_start(tp);
9408
9409         tg3_flag_clear(tp, RESET_TASK_PENDING);
9410 }
9411
9412 #define TG3_STAT_ADD32(PSTAT, REG) \
9413 do {    u32 __val = tr32(REG); \
9414         (PSTAT)->low += __val; \
9415         if ((PSTAT)->low < __val) \
9416                 (PSTAT)->high += 1; \
9417 } while (0)
9418
9419 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9420 {
9421         struct tg3_hw_stats *sp = tp->hw_stats;
9422
9423         if (!netif_carrier_ok(tp->dev))
9424                 return;
9425
9426         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9427         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9428         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9429         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9430         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9431         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9432         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9433         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9434         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9435         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9436         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9437         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9438         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9439
9440         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9441         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9442         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9443         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9444         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9445         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9446         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9447         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9448         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9449         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9450         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9451         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9452         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9453         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9454
9455         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9456         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9457             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9458             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9459                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9460         } else {
9461                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9462                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9463                 if (val) {
9464                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9465                         sp->rx_discards.low += val;
9466                         if (sp->rx_discards.low < val)
9467                                 sp->rx_discards.high += 1;
9468                 }
9469                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9470         }
9471         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9472 }
9473
9474 static void tg3_chk_missed_msi(struct tg3 *tp)
9475 {
9476         u32 i;
9477
9478         for (i = 0; i < tp->irq_cnt; i++) {
9479                 struct tg3_napi *tnapi = &tp->napi[i];
9480
9481                 if (tg3_has_work(tnapi)) {
9482                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9483                             tnapi->last_tx_cons == tnapi->tx_cons) {
9484                                 if (tnapi->chk_msi_cnt < 1) {
9485                                         tnapi->chk_msi_cnt++;
9486                                         return;
9487                                 }
9488                                 tg3_msi(0, tnapi);
9489                         }
9490                 }
9491                 tnapi->chk_msi_cnt = 0;
9492                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9493                 tnapi->last_tx_cons = tnapi->tx_cons;
9494         }
9495 }
9496
9497 static void tg3_timer(unsigned long __opaque)
9498 {
9499         struct tg3 *tp = (struct tg3 *) __opaque;
9500
9501         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9502                 goto restart_timer;
9503
9504         spin_lock(&tp->lock);
9505
9506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9507             tg3_flag(tp, 57765_CLASS))
9508                 tg3_chk_missed_msi(tp);
9509
9510         if (!tg3_flag(tp, TAGGED_STATUS)) {
9511                 /* All of this garbage is because when using non-tagged
9512                  * IRQ status the mailbox/status_block protocol the chip
9513                  * uses with the cpu is race prone.
9514                  */
9515                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9516                         tw32(GRC_LOCAL_CTRL,
9517                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9518                 } else {
9519                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9520                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9521                 }
9522
9523                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9524                         spin_unlock(&tp->lock);
9525                         tg3_reset_task_schedule(tp);
9526                         goto restart_timer;
9527                 }
9528         }
9529
9530         /* This part only runs once per second. */
9531         if (!--tp->timer_counter) {
9532                 if (tg3_flag(tp, 5705_PLUS))
9533                         tg3_periodic_fetch_stats(tp);
9534
9535                 if (tp->setlpicnt && !--tp->setlpicnt)
9536                         tg3_phy_eee_enable(tp);
9537
9538                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9539                         u32 mac_stat;
9540                         int phy_event;
9541
9542                         mac_stat = tr32(MAC_STATUS);
9543
9544                         phy_event = 0;
9545                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9546                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9547                                         phy_event = 1;
9548                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9549                                 phy_event = 1;
9550
9551                         if (phy_event)
9552                                 tg3_setup_phy(tp, 0);
9553                 } else if (tg3_flag(tp, POLL_SERDES)) {
9554                         u32 mac_stat = tr32(MAC_STATUS);
9555                         int need_setup = 0;
9556
9557                         if (netif_carrier_ok(tp->dev) &&
9558                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9559                                 need_setup = 1;
9560                         }
9561                         if (!netif_carrier_ok(tp->dev) &&
9562                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9563                                          MAC_STATUS_SIGNAL_DET))) {
9564                                 need_setup = 1;
9565                         }
9566                         if (need_setup) {
9567                                 if (!tp->serdes_counter) {
9568                                         tw32_f(MAC_MODE,
9569                                              (tp->mac_mode &
9570                                               ~MAC_MODE_PORT_MODE_MASK));
9571                                         udelay(40);
9572                                         tw32_f(MAC_MODE, tp->mac_mode);
9573                                         udelay(40);
9574                                 }
9575                                 tg3_setup_phy(tp, 0);
9576                         }
9577                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9578                            tg3_flag(tp, 5780_CLASS)) {
9579                         tg3_serdes_parallel_detect(tp);
9580                 }
9581
9582                 tp->timer_counter = tp->timer_multiplier;
9583         }
9584
9585         /* Heartbeat is only sent once every 2 seconds.
9586          *
9587          * The heartbeat is to tell the ASF firmware that the host
9588          * driver is still alive.  In the event that the OS crashes,
9589          * ASF needs to reset the hardware to free up the FIFO space
9590          * that may be filled with rx packets destined for the host.
9591          * If the FIFO is full, ASF will no longer function properly.
9592          *
9593          * Unintended resets have been reported on real time kernels
9594          * where the timer doesn't run on time.  Netpoll will also have
9595          * same problem.
9596          *
9597          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9598          * to check the ring condition when the heartbeat is expiring
9599          * before doing the reset.  This will prevent most unintended
9600          * resets.
9601          */
9602         if (!--tp->asf_counter) {
9603                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9604                         tg3_wait_for_event_ack(tp);
9605
9606                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9607                                       FWCMD_NICDRV_ALIVE3);
9608                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9609                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9610                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9611
9612                         tg3_generate_fw_event(tp);
9613                 }
9614                 tp->asf_counter = tp->asf_multiplier;
9615         }
9616
9617         spin_unlock(&tp->lock);
9618
9619 restart_timer:
9620         tp->timer.expires = jiffies + tp->timer_offset;
9621         add_timer(&tp->timer);
9622 }
9623
9624 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9625 {
9626         irq_handler_t fn;
9627         unsigned long flags;
9628         char *name;
9629         struct tg3_napi *tnapi = &tp->napi[irq_num];
9630
9631         if (tp->irq_cnt == 1)
9632                 name = tp->dev->name;
9633         else {
9634                 name = &tnapi->irq_lbl[0];
9635                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9636                 name[IFNAMSIZ-1] = 0;
9637         }
9638
9639         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9640                 fn = tg3_msi;
9641                 if (tg3_flag(tp, 1SHOT_MSI))
9642                         fn = tg3_msi_1shot;
9643                 flags = 0;
9644         } else {
9645                 fn = tg3_interrupt;
9646                 if (tg3_flag(tp, TAGGED_STATUS))
9647                         fn = tg3_interrupt_tagged;
9648                 flags = IRQF_SHARED;
9649         }
9650
9651         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9652 }
9653
9654 static int tg3_test_interrupt(struct tg3 *tp)
9655 {
9656         struct tg3_napi *tnapi = &tp->napi[0];
9657         struct net_device *dev = tp->dev;
9658         int err, i, intr_ok = 0;
9659         u32 val;
9660
9661         if (!netif_running(dev))
9662                 return -ENODEV;
9663
9664         tg3_disable_ints(tp);
9665
9666         free_irq(tnapi->irq_vec, tnapi);
9667
9668         /*
9669          * Turn off MSI one shot mode.  Otherwise this test has no
9670          * observable way to know whether the interrupt was delivered.
9671          */
9672         if (tg3_flag(tp, 57765_PLUS)) {
9673                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9674                 tw32(MSGINT_MODE, val);
9675         }
9676
9677         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9678                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9679         if (err)
9680                 return err;
9681
9682         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9683         tg3_enable_ints(tp);
9684
9685         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9686                tnapi->coal_now);
9687
9688         for (i = 0; i < 5; i++) {
9689                 u32 int_mbox, misc_host_ctrl;
9690
9691                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9692                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9693
9694                 if ((int_mbox != 0) ||
9695                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9696                         intr_ok = 1;
9697                         break;
9698                 }
9699
9700                 if (tg3_flag(tp, 57765_PLUS) &&
9701                     tnapi->hw_status->status_tag != tnapi->last_tag)
9702                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9703
9704                 msleep(10);
9705         }
9706
9707         tg3_disable_ints(tp);
9708
9709         free_irq(tnapi->irq_vec, tnapi);
9710
9711         err = tg3_request_irq(tp, 0);
9712
9713         if (err)
9714                 return err;
9715
9716         if (intr_ok) {
9717                 /* Reenable MSI one shot mode. */
9718                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9719                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9720                         tw32(MSGINT_MODE, val);
9721                 }
9722                 return 0;
9723         }
9724
9725         return -EIO;
9726 }
9727
9728 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9729  * successfully restored
9730  */
9731 static int tg3_test_msi(struct tg3 *tp)
9732 {
9733         int err;
9734         u16 pci_cmd;
9735
9736         if (!tg3_flag(tp, USING_MSI))
9737                 return 0;
9738
9739         /* Turn off SERR reporting in case MSI terminates with Master
9740          * Abort.
9741          */
9742         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9743         pci_write_config_word(tp->pdev, PCI_COMMAND,
9744                               pci_cmd & ~PCI_COMMAND_SERR);
9745
9746         err = tg3_test_interrupt(tp);
9747
9748         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9749
9750         if (!err)
9751                 return 0;
9752
9753         /* other failures */
9754         if (err != -EIO)
9755                 return err;
9756
9757         /* MSI test failed, go back to INTx mode */
9758         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9759                     "to INTx mode. Please report this failure to the PCI "
9760                     "maintainer and include system chipset information\n");
9761
9762         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9763
9764         pci_disable_msi(tp->pdev);
9765
9766         tg3_flag_clear(tp, USING_MSI);
9767         tp->napi[0].irq_vec = tp->pdev->irq;
9768
9769         err = tg3_request_irq(tp, 0);
9770         if (err)
9771                 return err;
9772
9773         /* Need to reset the chip because the MSI cycle may have terminated
9774          * with Master Abort.
9775          */
9776         tg3_full_lock(tp, 1);
9777
9778         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9779         err = tg3_init_hw(tp, 1);
9780
9781         tg3_full_unlock(tp);
9782
9783         if (err)
9784                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9785
9786         return err;
9787 }
9788
9789 static int tg3_request_firmware(struct tg3 *tp)
9790 {
9791         const __be32 *fw_data;
9792
9793         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9794                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9795                            tp->fw_needed);
9796                 return -ENOENT;
9797         }
9798
9799         fw_data = (void *)tp->fw->data;
9800
9801         /* Firmware blob starts with version numbers, followed by
9802          * start address and _full_ length including BSS sections
9803          * (which must be longer than the actual data, of course
9804          */
9805
9806         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9807         if (tp->fw_len < (tp->fw->size - 12)) {
9808                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9809                            tp->fw_len, tp->fw_needed);
9810                 release_firmware(tp->fw);
9811                 tp->fw = NULL;
9812                 return -EINVAL;
9813         }
9814
9815         /* We no longer need firmware; we have it. */
9816         tp->fw_needed = NULL;
9817         return 0;
9818 }
9819
9820 static bool tg3_enable_msix(struct tg3 *tp)
9821 {
9822         int i, rc;
9823         struct msix_entry msix_ent[tp->irq_max];
9824
9825         tp->irq_cnt = num_online_cpus();
9826         if (tp->irq_cnt > 1) {
9827                 /* We want as many rx rings enabled as there are cpus.
9828                  * In multiqueue MSI-X mode, the first MSI-X vector
9829                  * only deals with link interrupts, etc, so we add
9830                  * one to the number of vectors we are requesting.
9831                  */
9832                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9833         }
9834
9835         for (i = 0; i < tp->irq_max; i++) {
9836                 msix_ent[i].entry  = i;
9837                 msix_ent[i].vector = 0;
9838         }
9839
9840         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9841         if (rc < 0) {
9842                 return false;
9843         } else if (rc != 0) {
9844                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9845                         return false;
9846                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9847                               tp->irq_cnt, rc);
9848                 tp->irq_cnt = rc;
9849         }
9850
9851         for (i = 0; i < tp->irq_max; i++)
9852                 tp->napi[i].irq_vec = msix_ent[i].vector;
9853
9854         netif_set_real_num_tx_queues(tp->dev, 1);
9855         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9856         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9857                 pci_disable_msix(tp->pdev);
9858                 return false;
9859         }
9860
9861         if (tp->irq_cnt > 1) {
9862                 tg3_flag_set(tp, ENABLE_RSS);
9863
9864                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9865                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9866                         tg3_flag_set(tp, ENABLE_TSS);
9867                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9868                 }
9869         }
9870
9871         return true;
9872 }
9873
9874 static void tg3_ints_init(struct tg3 *tp)
9875 {
9876         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9877             !tg3_flag(tp, TAGGED_STATUS)) {
9878                 /* All MSI supporting chips should support tagged
9879                  * status.  Assert that this is the case.
9880                  */
9881                 netdev_warn(tp->dev,
9882                             "MSI without TAGGED_STATUS? Not using MSI\n");
9883                 goto defcfg;
9884         }
9885
9886         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9887                 tg3_flag_set(tp, USING_MSIX);
9888         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9889                 tg3_flag_set(tp, USING_MSI);
9890
9891         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9892                 u32 msi_mode = tr32(MSGINT_MODE);
9893                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9894                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9895                 if (!tg3_flag(tp, 1SHOT_MSI))
9896                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9897                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9898         }
9899 defcfg:
9900         if (!tg3_flag(tp, USING_MSIX)) {
9901                 tp->irq_cnt = 1;
9902                 tp->napi[0].irq_vec = tp->pdev->irq;
9903                 netif_set_real_num_tx_queues(tp->dev, 1);
9904                 netif_set_real_num_rx_queues(tp->dev, 1);
9905         }
9906 }
9907
9908 static void tg3_ints_fini(struct tg3 *tp)
9909 {
9910         if (tg3_flag(tp, USING_MSIX))
9911                 pci_disable_msix(tp->pdev);
9912         else if (tg3_flag(tp, USING_MSI))
9913                 pci_disable_msi(tp->pdev);
9914         tg3_flag_clear(tp, USING_MSI);
9915         tg3_flag_clear(tp, USING_MSIX);
9916         tg3_flag_clear(tp, ENABLE_RSS);
9917         tg3_flag_clear(tp, ENABLE_TSS);
9918 }
9919
9920 static int tg3_open(struct net_device *dev)
9921 {
9922         struct tg3 *tp = netdev_priv(dev);
9923         int i, err;
9924
9925         if (tp->fw_needed) {
9926                 err = tg3_request_firmware(tp);
9927                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9928                         if (err)
9929                                 return err;
9930                 } else if (err) {
9931                         netdev_warn(tp->dev, "TSO capability disabled\n");
9932                         tg3_flag_clear(tp, TSO_CAPABLE);
9933                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9934                         netdev_notice(tp->dev, "TSO capability restored\n");
9935                         tg3_flag_set(tp, TSO_CAPABLE);
9936                 }
9937         }
9938
9939         netif_carrier_off(tp->dev);
9940
9941         err = tg3_power_up(tp);
9942         if (err)
9943                 return err;
9944
9945         tg3_full_lock(tp, 0);
9946
9947         tg3_disable_ints(tp);
9948         tg3_flag_clear(tp, INIT_COMPLETE);
9949
9950         tg3_full_unlock(tp);
9951
9952         /*
9953          * Setup interrupts first so we know how
9954          * many NAPI resources to allocate
9955          */
9956         tg3_ints_init(tp);
9957
9958         tg3_rss_check_indir_tbl(tp);
9959
9960         /* The placement of this call is tied
9961          * to the setup and use of Host TX descriptors.
9962          */
9963         err = tg3_alloc_consistent(tp);
9964         if (err)
9965                 goto err_out1;
9966
9967         tg3_napi_init(tp);
9968
9969         tg3_napi_enable(tp);
9970
9971         for (i = 0; i < tp->irq_cnt; i++) {
9972                 struct tg3_napi *tnapi = &tp->napi[i];
9973                 err = tg3_request_irq(tp, i);
9974                 if (err) {
9975                         for (i--; i >= 0; i--) {
9976                                 tnapi = &tp->napi[i];
9977                                 free_irq(tnapi->irq_vec, tnapi);
9978                         }
9979                         goto err_out2;
9980                 }
9981         }
9982
9983         tg3_full_lock(tp, 0);
9984
9985         err = tg3_init_hw(tp, 1);
9986         if (err) {
9987                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9988                 tg3_free_rings(tp);
9989         } else {
9990                 if (tg3_flag(tp, TAGGED_STATUS) &&
9991                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9992                     !tg3_flag(tp, 57765_CLASS))
9993                         tp->timer_offset = HZ;
9994                 else
9995                         tp->timer_offset = HZ / 10;
9996
9997                 BUG_ON(tp->timer_offset > HZ);
9998                 tp->timer_counter = tp->timer_multiplier =
9999                         (HZ / tp->timer_offset);
10000                 tp->asf_counter = tp->asf_multiplier =
10001                         ((HZ / tp->timer_offset) * 2);
10002
10003                 init_timer(&tp->timer);
10004                 tp->timer.expires = jiffies + tp->timer_offset;
10005                 tp->timer.data = (unsigned long) tp;
10006                 tp->timer.function = tg3_timer;
10007         }
10008
10009         tg3_full_unlock(tp);
10010
10011         if (err)
10012                 goto err_out3;
10013
10014         if (tg3_flag(tp, USING_MSI)) {
10015                 err = tg3_test_msi(tp);
10016
10017                 if (err) {
10018                         tg3_full_lock(tp, 0);
10019                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10020                         tg3_free_rings(tp);
10021                         tg3_full_unlock(tp);
10022
10023                         goto err_out2;
10024                 }
10025
10026                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10027                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10028
10029                         tw32(PCIE_TRANSACTION_CFG,
10030                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10031                 }
10032         }
10033
10034         tg3_phy_start(tp);
10035
10036         tg3_full_lock(tp, 0);
10037
10038         add_timer(&tp->timer);
10039         tg3_flag_set(tp, INIT_COMPLETE);
10040         tg3_enable_ints(tp);
10041
10042         tg3_full_unlock(tp);
10043
10044         netif_tx_start_all_queues(dev);
10045
10046         /*
10047          * Reset loopback feature if it was turned on while the device was down
10048          * make sure that it's installed properly now.
10049          */
10050         if (dev->features & NETIF_F_LOOPBACK)
10051                 tg3_set_loopback(dev, dev->features);
10052
10053         return 0;
10054
10055 err_out3:
10056         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10057                 struct tg3_napi *tnapi = &tp->napi[i];
10058                 free_irq(tnapi->irq_vec, tnapi);
10059         }
10060
10061 err_out2:
10062         tg3_napi_disable(tp);
10063         tg3_napi_fini(tp);
10064         tg3_free_consistent(tp);
10065
10066 err_out1:
10067         tg3_ints_fini(tp);
10068         tg3_frob_aux_power(tp, false);
10069         pci_set_power_state(tp->pdev, PCI_D3hot);
10070         return err;
10071 }
10072
10073 static int tg3_close(struct net_device *dev)
10074 {
10075         int i;
10076         struct tg3 *tp = netdev_priv(dev);
10077
10078         tg3_napi_disable(tp);
10079         tg3_reset_task_cancel(tp);
10080
10081         netif_tx_stop_all_queues(dev);
10082
10083         del_timer_sync(&tp->timer);
10084
10085         tg3_phy_stop(tp);
10086
10087         tg3_full_lock(tp, 1);
10088
10089         tg3_disable_ints(tp);
10090
10091         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10092         tg3_free_rings(tp);
10093         tg3_flag_clear(tp, INIT_COMPLETE);
10094
10095         tg3_full_unlock(tp);
10096
10097         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10098                 struct tg3_napi *tnapi = &tp->napi[i];
10099                 free_irq(tnapi->irq_vec, tnapi);
10100         }
10101
10102         tg3_ints_fini(tp);
10103
10104         /* Clear stats across close / open calls */
10105         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10106         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10107
10108         tg3_napi_fini(tp);
10109
10110         tg3_free_consistent(tp);
10111
10112         tg3_power_down(tp);
10113
10114         netif_carrier_off(tp->dev);
10115
10116         return 0;
10117 }
10118
10119 static inline u64 get_stat64(tg3_stat64_t *val)
10120 {
10121        return ((u64)val->high << 32) | ((u64)val->low);
10122 }
10123
10124 static u64 calc_crc_errors(struct tg3 *tp)
10125 {
10126         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10127
10128         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10129             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10130              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10131                 u32 val;
10132
10133                 spin_lock_bh(&tp->lock);
10134                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10135                         tg3_writephy(tp, MII_TG3_TEST1,
10136                                      val | MII_TG3_TEST1_CRC_EN);
10137                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10138                 } else
10139                         val = 0;
10140                 spin_unlock_bh(&tp->lock);
10141
10142                 tp->phy_crc_errors += val;
10143
10144                 return tp->phy_crc_errors;
10145         }
10146
10147         return get_stat64(&hw_stats->rx_fcs_errors);
10148 }
10149
10150 #define ESTAT_ADD(member) \
10151         estats->member =        old_estats->member + \
10152                                 get_stat64(&hw_stats->member)
10153
10154 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
10155                                                struct tg3_ethtool_stats *estats)
10156 {
10157         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10158         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10159
10160         if (!hw_stats)
10161                 return old_estats;
10162
10163         ESTAT_ADD(rx_octets);
10164         ESTAT_ADD(rx_fragments);
10165         ESTAT_ADD(rx_ucast_packets);
10166         ESTAT_ADD(rx_mcast_packets);
10167         ESTAT_ADD(rx_bcast_packets);
10168         ESTAT_ADD(rx_fcs_errors);
10169         ESTAT_ADD(rx_align_errors);
10170         ESTAT_ADD(rx_xon_pause_rcvd);
10171         ESTAT_ADD(rx_xoff_pause_rcvd);
10172         ESTAT_ADD(rx_mac_ctrl_rcvd);
10173         ESTAT_ADD(rx_xoff_entered);
10174         ESTAT_ADD(rx_frame_too_long_errors);
10175         ESTAT_ADD(rx_jabbers);
10176         ESTAT_ADD(rx_undersize_packets);
10177         ESTAT_ADD(rx_in_length_errors);
10178         ESTAT_ADD(rx_out_length_errors);
10179         ESTAT_ADD(rx_64_or_less_octet_packets);
10180         ESTAT_ADD(rx_65_to_127_octet_packets);
10181         ESTAT_ADD(rx_128_to_255_octet_packets);
10182         ESTAT_ADD(rx_256_to_511_octet_packets);
10183         ESTAT_ADD(rx_512_to_1023_octet_packets);
10184         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10185         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10186         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10187         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10188         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10189
10190         ESTAT_ADD(tx_octets);
10191         ESTAT_ADD(tx_collisions);
10192         ESTAT_ADD(tx_xon_sent);
10193         ESTAT_ADD(tx_xoff_sent);
10194         ESTAT_ADD(tx_flow_control);
10195         ESTAT_ADD(tx_mac_errors);
10196         ESTAT_ADD(tx_single_collisions);
10197         ESTAT_ADD(tx_mult_collisions);
10198         ESTAT_ADD(tx_deferred);
10199         ESTAT_ADD(tx_excessive_collisions);
10200         ESTAT_ADD(tx_late_collisions);
10201         ESTAT_ADD(tx_collide_2times);
10202         ESTAT_ADD(tx_collide_3times);
10203         ESTAT_ADD(tx_collide_4times);
10204         ESTAT_ADD(tx_collide_5times);
10205         ESTAT_ADD(tx_collide_6times);
10206         ESTAT_ADD(tx_collide_7times);
10207         ESTAT_ADD(tx_collide_8times);
10208         ESTAT_ADD(tx_collide_9times);
10209         ESTAT_ADD(tx_collide_10times);
10210         ESTAT_ADD(tx_collide_11times);
10211         ESTAT_ADD(tx_collide_12times);
10212         ESTAT_ADD(tx_collide_13times);
10213         ESTAT_ADD(tx_collide_14times);
10214         ESTAT_ADD(tx_collide_15times);
10215         ESTAT_ADD(tx_ucast_packets);
10216         ESTAT_ADD(tx_mcast_packets);
10217         ESTAT_ADD(tx_bcast_packets);
10218         ESTAT_ADD(tx_carrier_sense_errors);
10219         ESTAT_ADD(tx_discards);
10220         ESTAT_ADD(tx_errors);
10221
10222         ESTAT_ADD(dma_writeq_full);
10223         ESTAT_ADD(dma_write_prioq_full);
10224         ESTAT_ADD(rxbds_empty);
10225         ESTAT_ADD(rx_discards);
10226         ESTAT_ADD(rx_errors);
10227         ESTAT_ADD(rx_threshold_hit);
10228
10229         ESTAT_ADD(dma_readq_full);
10230         ESTAT_ADD(dma_read_prioq_full);
10231         ESTAT_ADD(tx_comp_queue_full);
10232
10233         ESTAT_ADD(ring_set_send_prod_index);
10234         ESTAT_ADD(ring_status_update);
10235         ESTAT_ADD(nic_irqs);
10236         ESTAT_ADD(nic_avoided_irqs);
10237         ESTAT_ADD(nic_tx_threshold_hit);
10238
10239         ESTAT_ADD(mbuf_lwm_thresh_hit);
10240
10241         return estats;
10242 }
10243
10244 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
10245                                                  struct rtnl_link_stats64 *stats)
10246 {
10247         struct tg3 *tp = netdev_priv(dev);
10248         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10249         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10250
10251         if (!hw_stats)
10252                 return old_stats;
10253
10254         stats->rx_packets = old_stats->rx_packets +
10255                 get_stat64(&hw_stats->rx_ucast_packets) +
10256                 get_stat64(&hw_stats->rx_mcast_packets) +
10257                 get_stat64(&hw_stats->rx_bcast_packets);
10258
10259         stats->tx_packets = old_stats->tx_packets +
10260                 get_stat64(&hw_stats->tx_ucast_packets) +
10261                 get_stat64(&hw_stats->tx_mcast_packets) +
10262                 get_stat64(&hw_stats->tx_bcast_packets);
10263
10264         stats->rx_bytes = old_stats->rx_bytes +
10265                 get_stat64(&hw_stats->rx_octets);
10266         stats->tx_bytes = old_stats->tx_bytes +
10267                 get_stat64(&hw_stats->tx_octets);
10268
10269         stats->rx_errors = old_stats->rx_errors +
10270                 get_stat64(&hw_stats->rx_errors);
10271         stats->tx_errors = old_stats->tx_errors +
10272                 get_stat64(&hw_stats->tx_errors) +
10273                 get_stat64(&hw_stats->tx_mac_errors) +
10274                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10275                 get_stat64(&hw_stats->tx_discards);
10276
10277         stats->multicast = old_stats->multicast +
10278                 get_stat64(&hw_stats->rx_mcast_packets);
10279         stats->collisions = old_stats->collisions +
10280                 get_stat64(&hw_stats->tx_collisions);
10281
10282         stats->rx_length_errors = old_stats->rx_length_errors +
10283                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10284                 get_stat64(&hw_stats->rx_undersize_packets);
10285
10286         stats->rx_over_errors = old_stats->rx_over_errors +
10287                 get_stat64(&hw_stats->rxbds_empty);
10288         stats->rx_frame_errors = old_stats->rx_frame_errors +
10289                 get_stat64(&hw_stats->rx_align_errors);
10290         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10291                 get_stat64(&hw_stats->tx_discards);
10292         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10293                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10294
10295         stats->rx_crc_errors = old_stats->rx_crc_errors +
10296                 calc_crc_errors(tp);
10297
10298         stats->rx_missed_errors = old_stats->rx_missed_errors +
10299                 get_stat64(&hw_stats->rx_discards);
10300
10301         stats->rx_dropped = tp->rx_dropped;
10302         stats->tx_dropped = tp->tx_dropped;
10303
10304         return stats;
10305 }
10306
10307 static int tg3_get_regs_len(struct net_device *dev)
10308 {
10309         return TG3_REG_BLK_SIZE;
10310 }
10311
10312 static void tg3_get_regs(struct net_device *dev,
10313                 struct ethtool_regs *regs, void *_p)
10314 {
10315         struct tg3 *tp = netdev_priv(dev);
10316
10317         regs->version = 0;
10318
10319         memset(_p, 0, TG3_REG_BLK_SIZE);
10320
10321         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10322                 return;
10323
10324         tg3_full_lock(tp, 0);
10325
10326         tg3_dump_legacy_regs(tp, (u32 *)_p);
10327
10328         tg3_full_unlock(tp);
10329 }
10330
10331 static int tg3_get_eeprom_len(struct net_device *dev)
10332 {
10333         struct tg3 *tp = netdev_priv(dev);
10334
10335         return tp->nvram_size;
10336 }
10337
10338 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10339 {
10340         struct tg3 *tp = netdev_priv(dev);
10341         int ret;
10342         u8  *pd;
10343         u32 i, offset, len, b_offset, b_count;
10344         __be32 val;
10345
10346         if (tg3_flag(tp, NO_NVRAM))
10347                 return -EINVAL;
10348
10349         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10350                 return -EAGAIN;
10351
10352         offset = eeprom->offset;
10353         len = eeprom->len;
10354         eeprom->len = 0;
10355
10356         eeprom->magic = TG3_EEPROM_MAGIC;
10357
10358         if (offset & 3) {
10359                 /* adjustments to start on required 4 byte boundary */
10360                 b_offset = offset & 3;
10361                 b_count = 4 - b_offset;
10362                 if (b_count > len) {
10363                         /* i.e. offset=1 len=2 */
10364                         b_count = len;
10365                 }
10366                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10367                 if (ret)
10368                         return ret;
10369                 memcpy(data, ((char *)&val) + b_offset, b_count);
10370                 len -= b_count;
10371                 offset += b_count;
10372                 eeprom->len += b_count;
10373         }
10374
10375         /* read bytes up to the last 4 byte boundary */
10376         pd = &data[eeprom->len];
10377         for (i = 0; i < (len - (len & 3)); i += 4) {
10378                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10379                 if (ret) {
10380                         eeprom->len += i;
10381                         return ret;
10382                 }
10383                 memcpy(pd + i, &val, 4);
10384         }
10385         eeprom->len += i;
10386
10387         if (len & 3) {
10388                 /* read last bytes not ending on 4 byte boundary */
10389                 pd = &data[eeprom->len];
10390                 b_count = len & 3;
10391                 b_offset = offset + len - b_count;
10392                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10393                 if (ret)
10394                         return ret;
10395                 memcpy(pd, &val, b_count);
10396                 eeprom->len += b_count;
10397         }
10398         return 0;
10399 }
10400
10401 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10402 {
10403         struct tg3 *tp = netdev_priv(dev);
10404         int ret;
10405         u32 offset, len, b_offset, odd_len;
10406         u8 *buf;
10407         __be32 start, end;
10408
10409         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10410                 return -EAGAIN;
10411
10412         if (tg3_flag(tp, NO_NVRAM) ||
10413             eeprom->magic != TG3_EEPROM_MAGIC)
10414                 return -EINVAL;
10415
10416         offset = eeprom->offset;
10417         len = eeprom->len;
10418
10419         if ((b_offset = (offset & 3))) {
10420                 /* adjustments to start on required 4 byte boundary */
10421                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10422                 if (ret)
10423                         return ret;
10424                 len += b_offset;
10425                 offset &= ~3;
10426                 if (len < 4)
10427                         len = 4;
10428         }
10429
10430         odd_len = 0;
10431         if (len & 3) {
10432                 /* adjustments to end on required 4 byte boundary */
10433                 odd_len = 1;
10434                 len = (len + 3) & ~3;
10435                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10436                 if (ret)
10437                         return ret;
10438         }
10439
10440         buf = data;
10441         if (b_offset || odd_len) {
10442                 buf = kmalloc(len, GFP_KERNEL);
10443                 if (!buf)
10444                         return -ENOMEM;
10445                 if (b_offset)
10446                         memcpy(buf, &start, 4);
10447                 if (odd_len)
10448                         memcpy(buf+len-4, &end, 4);
10449                 memcpy(buf + b_offset, data, eeprom->len);
10450         }
10451
10452         ret = tg3_nvram_write_block(tp, offset, len, buf);
10453
10454         if (buf != data)
10455                 kfree(buf);
10456
10457         return ret;
10458 }
10459
10460 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10461 {
10462         struct tg3 *tp = netdev_priv(dev);
10463
10464         if (tg3_flag(tp, USE_PHYLIB)) {
10465                 struct phy_device *phydev;
10466                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10467                         return -EAGAIN;
10468                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10469                 return phy_ethtool_gset(phydev, cmd);
10470         }
10471
10472         cmd->supported = (SUPPORTED_Autoneg);
10473
10474         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10475                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10476                                    SUPPORTED_1000baseT_Full);
10477
10478         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10479                 cmd->supported |= (SUPPORTED_100baseT_Half |
10480                                   SUPPORTED_100baseT_Full |
10481                                   SUPPORTED_10baseT_Half |
10482                                   SUPPORTED_10baseT_Full |
10483                                   SUPPORTED_TP);
10484                 cmd->port = PORT_TP;
10485         } else {
10486                 cmd->supported |= SUPPORTED_FIBRE;
10487                 cmd->port = PORT_FIBRE;
10488         }
10489
10490         cmd->advertising = tp->link_config.advertising;
10491         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10492                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10493                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10494                                 cmd->advertising |= ADVERTISED_Pause;
10495                         } else {
10496                                 cmd->advertising |= ADVERTISED_Pause |
10497                                                     ADVERTISED_Asym_Pause;
10498                         }
10499                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10500                         cmd->advertising |= ADVERTISED_Asym_Pause;
10501                 }
10502         }
10503         if (netif_running(dev) && netif_carrier_ok(dev)) {
10504                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10505                 cmd->duplex = tp->link_config.active_duplex;
10506                 cmd->lp_advertising = tp->link_config.rmt_adv;
10507                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10508                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10509                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10510                         else
10511                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10512                 }
10513         } else {
10514                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10515                 cmd->duplex = DUPLEX_INVALID;
10516                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10517         }
10518         cmd->phy_address = tp->phy_addr;
10519         cmd->transceiver = XCVR_INTERNAL;
10520         cmd->autoneg = tp->link_config.autoneg;
10521         cmd->maxtxpkt = 0;
10522         cmd->maxrxpkt = 0;
10523         return 0;
10524 }
10525
10526 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10527 {
10528         struct tg3 *tp = netdev_priv(dev);
10529         u32 speed = ethtool_cmd_speed(cmd);
10530
10531         if (tg3_flag(tp, USE_PHYLIB)) {
10532                 struct phy_device *phydev;
10533                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10534                         return -EAGAIN;
10535                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10536                 return phy_ethtool_sset(phydev, cmd);
10537         }
10538
10539         if (cmd->autoneg != AUTONEG_ENABLE &&
10540             cmd->autoneg != AUTONEG_DISABLE)
10541                 return -EINVAL;
10542
10543         if (cmd->autoneg == AUTONEG_DISABLE &&
10544             cmd->duplex != DUPLEX_FULL &&
10545             cmd->duplex != DUPLEX_HALF)
10546                 return -EINVAL;
10547
10548         if (cmd->autoneg == AUTONEG_ENABLE) {
10549                 u32 mask = ADVERTISED_Autoneg |
10550                            ADVERTISED_Pause |
10551                            ADVERTISED_Asym_Pause;
10552
10553                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10554                         mask |= ADVERTISED_1000baseT_Half |
10555                                 ADVERTISED_1000baseT_Full;
10556
10557                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10558                         mask |= ADVERTISED_100baseT_Half |
10559                                 ADVERTISED_100baseT_Full |
10560                                 ADVERTISED_10baseT_Half |
10561                                 ADVERTISED_10baseT_Full |
10562                                 ADVERTISED_TP;
10563                 else
10564                         mask |= ADVERTISED_FIBRE;
10565
10566                 if (cmd->advertising & ~mask)
10567                         return -EINVAL;
10568
10569                 mask &= (ADVERTISED_1000baseT_Half |
10570                          ADVERTISED_1000baseT_Full |
10571                          ADVERTISED_100baseT_Half |
10572                          ADVERTISED_100baseT_Full |
10573                          ADVERTISED_10baseT_Half |
10574                          ADVERTISED_10baseT_Full);
10575
10576                 cmd->advertising &= mask;
10577         } else {
10578                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10579                         if (speed != SPEED_1000)
10580                                 return -EINVAL;
10581
10582                         if (cmd->duplex != DUPLEX_FULL)
10583                                 return -EINVAL;
10584                 } else {
10585                         if (speed != SPEED_100 &&
10586                             speed != SPEED_10)
10587                                 return -EINVAL;
10588                 }
10589         }
10590
10591         tg3_full_lock(tp, 0);
10592
10593         tp->link_config.autoneg = cmd->autoneg;
10594         if (cmd->autoneg == AUTONEG_ENABLE) {
10595                 tp->link_config.advertising = (cmd->advertising |
10596                                               ADVERTISED_Autoneg);
10597                 tp->link_config.speed = SPEED_INVALID;
10598                 tp->link_config.duplex = DUPLEX_INVALID;
10599         } else {
10600                 tp->link_config.advertising = 0;
10601                 tp->link_config.speed = speed;
10602                 tp->link_config.duplex = cmd->duplex;
10603         }
10604
10605         tp->link_config.orig_speed = tp->link_config.speed;
10606         tp->link_config.orig_duplex = tp->link_config.duplex;
10607         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10608
10609         if (netif_running(dev))
10610                 tg3_setup_phy(tp, 1);
10611
10612         tg3_full_unlock(tp);
10613
10614         return 0;
10615 }
10616
10617 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10618 {
10619         struct tg3 *tp = netdev_priv(dev);
10620
10621         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10622         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10623         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10624         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10625 }
10626
10627 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10628 {
10629         struct tg3 *tp = netdev_priv(dev);
10630
10631         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10632                 wol->supported = WAKE_MAGIC;
10633         else
10634                 wol->supported = 0;
10635         wol->wolopts = 0;
10636         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10637                 wol->wolopts = WAKE_MAGIC;
10638         memset(&wol->sopass, 0, sizeof(wol->sopass));
10639 }
10640
10641 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10642 {
10643         struct tg3 *tp = netdev_priv(dev);
10644         struct device *dp = &tp->pdev->dev;
10645
10646         if (wol->wolopts & ~WAKE_MAGIC)
10647                 return -EINVAL;
10648         if ((wol->wolopts & WAKE_MAGIC) &&
10649             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10650                 return -EINVAL;
10651
10652         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10653
10654         spin_lock_bh(&tp->lock);
10655         if (device_may_wakeup(dp))
10656                 tg3_flag_set(tp, WOL_ENABLE);
10657         else
10658                 tg3_flag_clear(tp, WOL_ENABLE);
10659         spin_unlock_bh(&tp->lock);
10660
10661         return 0;
10662 }
10663
10664 static u32 tg3_get_msglevel(struct net_device *dev)
10665 {
10666         struct tg3 *tp = netdev_priv(dev);
10667         return tp->msg_enable;
10668 }
10669
10670 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10671 {
10672         struct tg3 *tp = netdev_priv(dev);
10673         tp->msg_enable = value;
10674 }
10675
10676 static int tg3_nway_reset(struct net_device *dev)
10677 {
10678         struct tg3 *tp = netdev_priv(dev);
10679         int r;
10680
10681         if (!netif_running(dev))
10682                 return -EAGAIN;
10683
10684         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10685                 return -EINVAL;
10686
10687         if (tg3_flag(tp, USE_PHYLIB)) {
10688                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10689                         return -EAGAIN;
10690                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10691         } else {
10692                 u32 bmcr;
10693
10694                 spin_lock_bh(&tp->lock);
10695                 r = -EINVAL;
10696                 tg3_readphy(tp, MII_BMCR, &bmcr);
10697                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10698                     ((bmcr & BMCR_ANENABLE) ||
10699                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10700                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10701                                                    BMCR_ANENABLE);
10702                         r = 0;
10703                 }
10704                 spin_unlock_bh(&tp->lock);
10705         }
10706
10707         return r;
10708 }
10709
10710 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10711 {
10712         struct tg3 *tp = netdev_priv(dev);
10713
10714         ering->rx_max_pending = tp->rx_std_ring_mask;
10715         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10716                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10717         else
10718                 ering->rx_jumbo_max_pending = 0;
10719
10720         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10721
10722         ering->rx_pending = tp->rx_pending;
10723         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10724                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10725         else
10726                 ering->rx_jumbo_pending = 0;
10727
10728         ering->tx_pending = tp->napi[0].tx_pending;
10729 }
10730
10731 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10732 {
10733         struct tg3 *tp = netdev_priv(dev);
10734         int i, irq_sync = 0, err = 0;
10735
10736         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10737             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10738             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10739             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10740             (tg3_flag(tp, TSO_BUG) &&
10741              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10742                 return -EINVAL;
10743
10744         if (netif_running(dev)) {
10745                 tg3_phy_stop(tp);
10746                 tg3_netif_stop(tp);
10747                 irq_sync = 1;
10748         }
10749
10750         tg3_full_lock(tp, irq_sync);
10751
10752         tp->rx_pending = ering->rx_pending;
10753
10754         if (tg3_flag(tp, MAX_RXPEND_64) &&
10755             tp->rx_pending > 63)
10756                 tp->rx_pending = 63;
10757         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10758
10759         for (i = 0; i < tp->irq_max; i++)
10760                 tp->napi[i].tx_pending = ering->tx_pending;
10761
10762         if (netif_running(dev)) {
10763                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10764                 err = tg3_restart_hw(tp, 1);
10765                 if (!err)
10766                         tg3_netif_start(tp);
10767         }
10768
10769         tg3_full_unlock(tp);
10770
10771         if (irq_sync && !err)
10772                 tg3_phy_start(tp);
10773
10774         return err;
10775 }
10776
10777 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10778 {
10779         struct tg3 *tp = netdev_priv(dev);
10780
10781         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10782
10783         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10784                 epause->rx_pause = 1;
10785         else
10786                 epause->rx_pause = 0;
10787
10788         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10789                 epause->tx_pause = 1;
10790         else
10791                 epause->tx_pause = 0;
10792 }
10793
10794 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10795 {
10796         struct tg3 *tp = netdev_priv(dev);
10797         int err = 0;
10798
10799         if (tg3_flag(tp, USE_PHYLIB)) {
10800                 u32 newadv;
10801                 struct phy_device *phydev;
10802
10803                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10804
10805                 if (!(phydev->supported & SUPPORTED_Pause) ||
10806                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10807                      (epause->rx_pause != epause->tx_pause)))
10808                         return -EINVAL;
10809
10810                 tp->link_config.flowctrl = 0;
10811                 if (epause->rx_pause) {
10812                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10813
10814                         if (epause->tx_pause) {
10815                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10816                                 newadv = ADVERTISED_Pause;
10817                         } else
10818                                 newadv = ADVERTISED_Pause |
10819                                          ADVERTISED_Asym_Pause;
10820                 } else if (epause->tx_pause) {
10821                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10822                         newadv = ADVERTISED_Asym_Pause;
10823                 } else
10824                         newadv = 0;
10825
10826                 if (epause->autoneg)
10827                         tg3_flag_set(tp, PAUSE_AUTONEG);
10828                 else
10829                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10830
10831                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10832                         u32 oldadv = phydev->advertising &
10833                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10834                         if (oldadv != newadv) {
10835                                 phydev->advertising &=
10836                                         ~(ADVERTISED_Pause |
10837                                           ADVERTISED_Asym_Pause);
10838                                 phydev->advertising |= newadv;
10839                                 if (phydev->autoneg) {
10840                                         /*
10841                                          * Always renegotiate the link to
10842                                          * inform our link partner of our
10843                                          * flow control settings, even if the
10844                                          * flow control is forced.  Let
10845                                          * tg3_adjust_link() do the final
10846                                          * flow control setup.
10847                                          */
10848                                         return phy_start_aneg(phydev);
10849                                 }
10850                         }
10851
10852                         if (!epause->autoneg)
10853                                 tg3_setup_flow_control(tp, 0, 0);
10854                 } else {
10855                         tp->link_config.orig_advertising &=
10856                                         ~(ADVERTISED_Pause |
10857                                           ADVERTISED_Asym_Pause);
10858                         tp->link_config.orig_advertising |= newadv;
10859                 }
10860         } else {
10861                 int irq_sync = 0;
10862
10863                 if (netif_running(dev)) {
10864                         tg3_netif_stop(tp);
10865                         irq_sync = 1;
10866                 }
10867
10868                 tg3_full_lock(tp, irq_sync);
10869
10870                 if (epause->autoneg)
10871                         tg3_flag_set(tp, PAUSE_AUTONEG);
10872                 else
10873                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10874                 if (epause->rx_pause)
10875                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10876                 else
10877                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10878                 if (epause->tx_pause)
10879                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10880                 else
10881                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10882
10883                 if (netif_running(dev)) {
10884                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10885                         err = tg3_restart_hw(tp, 1);
10886                         if (!err)
10887                                 tg3_netif_start(tp);
10888                 }
10889
10890                 tg3_full_unlock(tp);
10891         }
10892
10893         return err;
10894 }
10895
10896 static int tg3_get_sset_count(struct net_device *dev, int sset)
10897 {
10898         switch (sset) {
10899         case ETH_SS_TEST:
10900                 return TG3_NUM_TEST;
10901         case ETH_SS_STATS:
10902                 return TG3_NUM_STATS;
10903         default:
10904                 return -EOPNOTSUPP;
10905         }
10906 }
10907
10908 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10909                          u32 *rules __always_unused)
10910 {
10911         struct tg3 *tp = netdev_priv(dev);
10912
10913         if (!tg3_flag(tp, SUPPORT_MSIX))
10914                 return -EOPNOTSUPP;
10915
10916         switch (info->cmd) {
10917         case ETHTOOL_GRXRINGS:
10918                 if (netif_running(tp->dev))
10919                         info->data = tp->irq_cnt;
10920                 else {
10921                         info->data = num_online_cpus();
10922                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10923                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10924                 }
10925
10926                 /* The first interrupt vector only
10927                  * handles link interrupts.
10928                  */
10929                 info->data -= 1;
10930                 return 0;
10931
10932         default:
10933                 return -EOPNOTSUPP;
10934         }
10935 }
10936
10937 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10938 {
10939         u32 size = 0;
10940         struct tg3 *tp = netdev_priv(dev);
10941
10942         if (tg3_flag(tp, SUPPORT_MSIX))
10943                 size = TG3_RSS_INDIR_TBL_SIZE;
10944
10945         return size;
10946 }
10947
10948 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10949 {
10950         struct tg3 *tp = netdev_priv(dev);
10951         int i;
10952
10953         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10954                 indir[i] = tp->rss_ind_tbl[i];
10955
10956         return 0;
10957 }
10958
10959 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10960 {
10961         struct tg3 *tp = netdev_priv(dev);
10962         size_t i;
10963
10964         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10965                 tp->rss_ind_tbl[i] = indir[i];
10966
10967         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10968                 return 0;
10969
10970         /* It is legal to write the indirection
10971          * table while the device is running.
10972          */
10973         tg3_full_lock(tp, 0);
10974         tg3_rss_write_indir_tbl(tp);
10975         tg3_full_unlock(tp);
10976
10977         return 0;
10978 }
10979
10980 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10981 {
10982         switch (stringset) {
10983         case ETH_SS_STATS:
10984                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10985                 break;
10986         case ETH_SS_TEST:
10987                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10988                 break;
10989         default:
10990                 WARN_ON(1);     /* we need a WARN() */
10991                 break;
10992         }
10993 }
10994
10995 static int tg3_set_phys_id(struct net_device *dev,
10996                             enum ethtool_phys_id_state state)
10997 {
10998         struct tg3 *tp = netdev_priv(dev);
10999
11000         if (!netif_running(tp->dev))
11001                 return -EAGAIN;
11002
11003         switch (state) {
11004         case ETHTOOL_ID_ACTIVE:
11005                 return 1;       /* cycle on/off once per second */
11006
11007         case ETHTOOL_ID_ON:
11008                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11009                      LED_CTRL_1000MBPS_ON |
11010                      LED_CTRL_100MBPS_ON |
11011                      LED_CTRL_10MBPS_ON |
11012                      LED_CTRL_TRAFFIC_OVERRIDE |
11013                      LED_CTRL_TRAFFIC_BLINK |
11014                      LED_CTRL_TRAFFIC_LED);
11015                 break;
11016
11017         case ETHTOOL_ID_OFF:
11018                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11019                      LED_CTRL_TRAFFIC_OVERRIDE);
11020                 break;
11021
11022         case ETHTOOL_ID_INACTIVE:
11023                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11024                 break;
11025         }
11026
11027         return 0;
11028 }
11029
11030 static void tg3_get_ethtool_stats(struct net_device *dev,
11031                                    struct ethtool_stats *estats, u64 *tmp_stats)
11032 {
11033         struct tg3 *tp = netdev_priv(dev);
11034
11035         tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11036 }
11037
11038 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11039 {
11040         int i;
11041         __be32 *buf;
11042         u32 offset = 0, len = 0;
11043         u32 magic, val;
11044
11045         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11046                 return NULL;
11047
11048         if (magic == TG3_EEPROM_MAGIC) {
11049                 for (offset = TG3_NVM_DIR_START;
11050                      offset < TG3_NVM_DIR_END;
11051                      offset += TG3_NVM_DIRENT_SIZE) {
11052                         if (tg3_nvram_read(tp, offset, &val))
11053                                 return NULL;
11054
11055                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11056                             TG3_NVM_DIRTYPE_EXTVPD)
11057                                 break;
11058                 }
11059
11060                 if (offset != TG3_NVM_DIR_END) {
11061                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11062                         if (tg3_nvram_read(tp, offset + 4, &offset))
11063                                 return NULL;
11064
11065                         offset = tg3_nvram_logical_addr(tp, offset);
11066                 }
11067         }
11068
11069         if (!offset || !len) {
11070                 offset = TG3_NVM_VPD_OFF;
11071                 len = TG3_NVM_VPD_LEN;
11072         }
11073
11074         buf = kmalloc(len, GFP_KERNEL);
11075         if (buf == NULL)
11076                 return NULL;
11077
11078         if (magic == TG3_EEPROM_MAGIC) {
11079                 for (i = 0; i < len; i += 4) {
11080                         /* The data is in little-endian format in NVRAM.
11081                          * Use the big-endian read routines to preserve
11082                          * the byte order as it exists in NVRAM.
11083                          */
11084                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11085                                 goto error;
11086                 }
11087         } else {
11088                 u8 *ptr;
11089                 ssize_t cnt;
11090                 unsigned int pos = 0;
11091
11092                 ptr = (u8 *)&buf[0];
11093                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11094                         cnt = pci_read_vpd(tp->pdev, pos,
11095                                            len - pos, ptr);
11096                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11097                                 cnt = 0;
11098                         else if (cnt < 0)
11099                                 goto error;
11100                 }
11101                 if (pos != len)
11102                         goto error;
11103         }
11104
11105         *vpdlen = len;
11106
11107         return buf;
11108
11109 error:
11110         kfree(buf);
11111         return NULL;
11112 }
11113
11114 #define NVRAM_TEST_SIZE 0x100
11115 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11116 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11117 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11118 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11119 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11120 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11121 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11122 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11123
11124 static int tg3_test_nvram(struct tg3 *tp)
11125 {
11126         u32 csum, magic, len;
11127         __be32 *buf;
11128         int i, j, k, err = 0, size;
11129
11130         if (tg3_flag(tp, NO_NVRAM))
11131                 return 0;
11132
11133         if (tg3_nvram_read(tp, 0, &magic) != 0)
11134                 return -EIO;
11135
11136         if (magic == TG3_EEPROM_MAGIC)
11137                 size = NVRAM_TEST_SIZE;
11138         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11139                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11140                     TG3_EEPROM_SB_FORMAT_1) {
11141                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11142                         case TG3_EEPROM_SB_REVISION_0:
11143                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11144                                 break;
11145                         case TG3_EEPROM_SB_REVISION_2:
11146                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11147                                 break;
11148                         case TG3_EEPROM_SB_REVISION_3:
11149                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11150                                 break;
11151                         case TG3_EEPROM_SB_REVISION_4:
11152                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11153                                 break;
11154                         case TG3_EEPROM_SB_REVISION_5:
11155                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11156                                 break;
11157                         case TG3_EEPROM_SB_REVISION_6:
11158                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11159                                 break;
11160                         default:
11161                                 return -EIO;
11162                         }
11163                 } else
11164                         return 0;
11165         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11166                 size = NVRAM_SELFBOOT_HW_SIZE;
11167         else
11168                 return -EIO;
11169
11170         buf = kmalloc(size, GFP_KERNEL);
11171         if (buf == NULL)
11172                 return -ENOMEM;
11173
11174         err = -EIO;
11175         for (i = 0, j = 0; i < size; i += 4, j++) {
11176                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11177                 if (err)
11178                         break;
11179         }
11180         if (i < size)
11181                 goto out;
11182
11183         /* Selfboot format */
11184         magic = be32_to_cpu(buf[0]);
11185         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11186             TG3_EEPROM_MAGIC_FW) {
11187                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11188
11189                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11190                     TG3_EEPROM_SB_REVISION_2) {
11191                         /* For rev 2, the csum doesn't include the MBA. */
11192                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11193                                 csum8 += buf8[i];
11194                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11195                                 csum8 += buf8[i];
11196                 } else {
11197                         for (i = 0; i < size; i++)
11198                                 csum8 += buf8[i];
11199                 }
11200
11201                 if (csum8 == 0) {
11202                         err = 0;
11203                         goto out;
11204                 }
11205
11206                 err = -EIO;
11207                 goto out;
11208         }
11209
11210         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11211             TG3_EEPROM_MAGIC_HW) {
11212                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11213                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11214                 u8 *buf8 = (u8 *) buf;
11215
11216                 /* Separate the parity bits and the data bytes.  */
11217                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11218                         if ((i == 0) || (i == 8)) {
11219                                 int l;
11220                                 u8 msk;
11221
11222                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11223                                         parity[k++] = buf8[i] & msk;
11224                                 i++;
11225                         } else if (i == 16) {
11226                                 int l;
11227                                 u8 msk;
11228
11229                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11230                                         parity[k++] = buf8[i] & msk;
11231                                 i++;
11232
11233                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11234                                         parity[k++] = buf8[i] & msk;
11235                                 i++;
11236                         }
11237                         data[j++] = buf8[i];
11238                 }
11239
11240                 err = -EIO;
11241                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11242                         u8 hw8 = hweight8(data[i]);
11243
11244                         if ((hw8 & 0x1) && parity[i])
11245                                 goto out;
11246                         else if (!(hw8 & 0x1) && !parity[i])
11247                                 goto out;
11248                 }
11249                 err = 0;
11250                 goto out;
11251         }
11252
11253         err = -EIO;
11254
11255         /* Bootstrap checksum at offset 0x10 */
11256         csum = calc_crc((unsigned char *) buf, 0x10);
11257         if (csum != le32_to_cpu(buf[0x10/4]))
11258                 goto out;
11259
11260         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11261         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11262         if (csum != le32_to_cpu(buf[0xfc/4]))
11263                 goto out;
11264
11265         kfree(buf);
11266
11267         buf = tg3_vpd_readblock(tp, &len);
11268         if (!buf)
11269                 return -ENOMEM;
11270
11271         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11272         if (i > 0) {
11273                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11274                 if (j < 0)
11275                         goto out;
11276
11277                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11278                         goto out;
11279
11280                 i += PCI_VPD_LRDT_TAG_SIZE;
11281                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11282                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11283                 if (j > 0) {
11284                         u8 csum8 = 0;
11285
11286                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11287
11288                         for (i = 0; i <= j; i++)
11289                                 csum8 += ((u8 *)buf)[i];
11290
11291                         if (csum8)
11292                                 goto out;
11293                 }
11294         }
11295
11296         err = 0;
11297
11298 out:
11299         kfree(buf);
11300         return err;
11301 }
11302
11303 #define TG3_SERDES_TIMEOUT_SEC  2
11304 #define TG3_COPPER_TIMEOUT_SEC  6
11305
11306 static int tg3_test_link(struct tg3 *tp)
11307 {
11308         int i, max;
11309
11310         if (!netif_running(tp->dev))
11311                 return -ENODEV;
11312
11313         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11314                 max = TG3_SERDES_TIMEOUT_SEC;
11315         else
11316                 max = TG3_COPPER_TIMEOUT_SEC;
11317
11318         for (i = 0; i < max; i++) {
11319                 if (netif_carrier_ok(tp->dev))
11320                         return 0;
11321
11322                 if (msleep_interruptible(1000))
11323                         break;
11324         }
11325
11326         return -EIO;
11327 }
11328
11329 /* Only test the commonly used registers */
11330 static int tg3_test_registers(struct tg3 *tp)
11331 {
11332         int i, is_5705, is_5750;
11333         u32 offset, read_mask, write_mask, val, save_val, read_val;
11334         static struct {
11335                 u16 offset;
11336                 u16 flags;
11337 #define TG3_FL_5705     0x1
11338 #define TG3_FL_NOT_5705 0x2
11339 #define TG3_FL_NOT_5788 0x4
11340 #define TG3_FL_NOT_5750 0x8
11341                 u32 read_mask;
11342                 u32 write_mask;
11343         } reg_tbl[] = {
11344                 /* MAC Control Registers */
11345                 { MAC_MODE, TG3_FL_NOT_5705,
11346                         0x00000000, 0x00ef6f8c },
11347                 { MAC_MODE, TG3_FL_5705,
11348                         0x00000000, 0x01ef6b8c },
11349                 { MAC_STATUS, TG3_FL_NOT_5705,
11350                         0x03800107, 0x00000000 },
11351                 { MAC_STATUS, TG3_FL_5705,
11352                         0x03800100, 0x00000000 },
11353                 { MAC_ADDR_0_HIGH, 0x0000,
11354                         0x00000000, 0x0000ffff },
11355                 { MAC_ADDR_0_LOW, 0x0000,
11356                         0x00000000, 0xffffffff },
11357                 { MAC_RX_MTU_SIZE, 0x0000,
11358                         0x00000000, 0x0000ffff },
11359                 { MAC_TX_MODE, 0x0000,
11360                         0x00000000, 0x00000070 },
11361                 { MAC_TX_LENGTHS, 0x0000,
11362                         0x00000000, 0x00003fff },
11363                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11364                         0x00000000, 0x000007fc },
11365                 { MAC_RX_MODE, TG3_FL_5705,
11366                         0x00000000, 0x000007dc },
11367                 { MAC_HASH_REG_0, 0x0000,
11368                         0x00000000, 0xffffffff },
11369                 { MAC_HASH_REG_1, 0x0000,
11370                         0x00000000, 0xffffffff },
11371                 { MAC_HASH_REG_2, 0x0000,
11372                         0x00000000, 0xffffffff },
11373                 { MAC_HASH_REG_3, 0x0000,
11374                         0x00000000, 0xffffffff },
11375
11376                 /* Receive Data and Receive BD Initiator Control Registers. */
11377                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11378                         0x00000000, 0xffffffff },
11379                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11380                         0x00000000, 0xffffffff },
11381                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11382                         0x00000000, 0x00000003 },
11383                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11384                         0x00000000, 0xffffffff },
11385                 { RCVDBDI_STD_BD+0, 0x0000,
11386                         0x00000000, 0xffffffff },
11387                 { RCVDBDI_STD_BD+4, 0x0000,
11388                         0x00000000, 0xffffffff },
11389                 { RCVDBDI_STD_BD+8, 0x0000,
11390                         0x00000000, 0xffff0002 },
11391                 { RCVDBDI_STD_BD+0xc, 0x0000,
11392                         0x00000000, 0xffffffff },
11393
11394                 /* Receive BD Initiator Control Registers. */
11395                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11396                         0x00000000, 0xffffffff },
11397                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11398                         0x00000000, 0x000003ff },
11399                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11400                         0x00000000, 0xffffffff },
11401
11402                 /* Host Coalescing Control Registers. */
11403                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11404                         0x00000000, 0x00000004 },
11405                 { HOSTCC_MODE, TG3_FL_5705,
11406                         0x00000000, 0x000000f6 },
11407                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11408                         0x00000000, 0xffffffff },
11409                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11410                         0x00000000, 0x000003ff },
11411                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11412                         0x00000000, 0xffffffff },
11413                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11414                         0x00000000, 0x000003ff },
11415                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11416                         0x00000000, 0xffffffff },
11417                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11418                         0x00000000, 0x000000ff },
11419                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11420                         0x00000000, 0xffffffff },
11421                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11422                         0x00000000, 0x000000ff },
11423                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11424                         0x00000000, 0xffffffff },
11425                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11426                         0x00000000, 0xffffffff },
11427                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11428                         0x00000000, 0xffffffff },
11429                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11430                         0x00000000, 0x000000ff },
11431                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11432                         0x00000000, 0xffffffff },
11433                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11434                         0x00000000, 0x000000ff },
11435                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11436                         0x00000000, 0xffffffff },
11437                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11438                         0x00000000, 0xffffffff },
11439                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11440                         0x00000000, 0xffffffff },
11441                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11442                         0x00000000, 0xffffffff },
11443                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11444                         0x00000000, 0xffffffff },
11445                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11446                         0xffffffff, 0x00000000 },
11447                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11448                         0xffffffff, 0x00000000 },
11449
11450                 /* Buffer Manager Control Registers. */
11451                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11452                         0x00000000, 0x007fff80 },
11453                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11454                         0x00000000, 0x007fffff },
11455                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11456                         0x00000000, 0x0000003f },
11457                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11458                         0x00000000, 0x000001ff },
11459                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11460                         0x00000000, 0x000001ff },
11461                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11462                         0xffffffff, 0x00000000 },
11463                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11464                         0xffffffff, 0x00000000 },
11465
11466                 /* Mailbox Registers */
11467                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11468                         0x00000000, 0x000001ff },
11469                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11470                         0x00000000, 0x000001ff },
11471                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11472                         0x00000000, 0x000007ff },
11473                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11474                         0x00000000, 0x000001ff },
11475
11476                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11477         };
11478
11479         is_5705 = is_5750 = 0;
11480         if (tg3_flag(tp, 5705_PLUS)) {
11481                 is_5705 = 1;
11482                 if (tg3_flag(tp, 5750_PLUS))
11483                         is_5750 = 1;
11484         }
11485
11486         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11487                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11488                         continue;
11489
11490                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11491                         continue;
11492
11493                 if (tg3_flag(tp, IS_5788) &&
11494                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11495                         continue;
11496
11497                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11498                         continue;
11499
11500                 offset = (u32) reg_tbl[i].offset;
11501                 read_mask = reg_tbl[i].read_mask;
11502                 write_mask = reg_tbl[i].write_mask;
11503
11504                 /* Save the original register content */
11505                 save_val = tr32(offset);
11506
11507                 /* Determine the read-only value. */
11508                 read_val = save_val & read_mask;
11509
11510                 /* Write zero to the register, then make sure the read-only bits
11511                  * are not changed and the read/write bits are all zeros.
11512                  */
11513                 tw32(offset, 0);
11514
11515                 val = tr32(offset);
11516
11517                 /* Test the read-only and read/write bits. */
11518                 if (((val & read_mask) != read_val) || (val & write_mask))
11519                         goto out;
11520
11521                 /* Write ones to all the bits defined by RdMask and WrMask, then
11522                  * make sure the read-only bits are not changed and the
11523                  * read/write bits are all ones.
11524                  */
11525                 tw32(offset, read_mask | write_mask);
11526
11527                 val = tr32(offset);
11528
11529                 /* Test the read-only bits. */
11530                 if ((val & read_mask) != read_val)
11531                         goto out;
11532
11533                 /* Test the read/write bits. */
11534                 if ((val & write_mask) != write_mask)
11535                         goto out;
11536
11537                 tw32(offset, save_val);
11538         }
11539
11540         return 0;
11541
11542 out:
11543         if (netif_msg_hw(tp))
11544                 netdev_err(tp->dev,
11545                            "Register test failed at offset %x\n", offset);
11546         tw32(offset, save_val);
11547         return -EIO;
11548 }
11549
11550 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11551 {
11552         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11553         int i;
11554         u32 j;
11555
11556         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11557                 for (j = 0; j < len; j += 4) {
11558                         u32 val;
11559
11560                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11561                         tg3_read_mem(tp, offset + j, &val);
11562                         if (val != test_pattern[i])
11563                                 return -EIO;
11564                 }
11565         }
11566         return 0;
11567 }
11568
11569 static int tg3_test_memory(struct tg3 *tp)
11570 {
11571         static struct mem_entry {
11572                 u32 offset;
11573                 u32 len;
11574         } mem_tbl_570x[] = {
11575                 { 0x00000000, 0x00b50},
11576                 { 0x00002000, 0x1c000},
11577                 { 0xffffffff, 0x00000}
11578         }, mem_tbl_5705[] = {
11579                 { 0x00000100, 0x0000c},
11580                 { 0x00000200, 0x00008},
11581                 { 0x00004000, 0x00800},
11582                 { 0x00006000, 0x01000},
11583                 { 0x00008000, 0x02000},
11584                 { 0x00010000, 0x0e000},
11585                 { 0xffffffff, 0x00000}
11586         }, mem_tbl_5755[] = {
11587                 { 0x00000200, 0x00008},
11588                 { 0x00004000, 0x00800},
11589                 { 0x00006000, 0x00800},
11590                 { 0x00008000, 0x02000},
11591                 { 0x00010000, 0x0c000},
11592                 { 0xffffffff, 0x00000}
11593         }, mem_tbl_5906[] = {
11594                 { 0x00000200, 0x00008},
11595                 { 0x00004000, 0x00400},
11596                 { 0x00006000, 0x00400},
11597                 { 0x00008000, 0x01000},
11598                 { 0x00010000, 0x01000},
11599                 { 0xffffffff, 0x00000}
11600         }, mem_tbl_5717[] = {
11601                 { 0x00000200, 0x00008},
11602                 { 0x00010000, 0x0a000},
11603                 { 0x00020000, 0x13c00},
11604                 { 0xffffffff, 0x00000}
11605         }, mem_tbl_57765[] = {
11606                 { 0x00000200, 0x00008},
11607                 { 0x00004000, 0x00800},
11608                 { 0x00006000, 0x09800},
11609                 { 0x00010000, 0x0a000},
11610                 { 0xffffffff, 0x00000}
11611         };
11612         struct mem_entry *mem_tbl;
11613         int err = 0;
11614         int i;
11615
11616         if (tg3_flag(tp, 5717_PLUS))
11617                 mem_tbl = mem_tbl_5717;
11618         else if (tg3_flag(tp, 57765_CLASS))
11619                 mem_tbl = mem_tbl_57765;
11620         else if (tg3_flag(tp, 5755_PLUS))
11621                 mem_tbl = mem_tbl_5755;
11622         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11623                 mem_tbl = mem_tbl_5906;
11624         else if (tg3_flag(tp, 5705_PLUS))
11625                 mem_tbl = mem_tbl_5705;
11626         else
11627                 mem_tbl = mem_tbl_570x;
11628
11629         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11630                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11631                 if (err)
11632                         break;
11633         }
11634
11635         return err;
11636 }
11637
11638 #define TG3_TSO_MSS             500
11639
11640 #define TG3_TSO_IP_HDR_LEN      20
11641 #define TG3_TSO_TCP_HDR_LEN     20
11642 #define TG3_TSO_TCP_OPT_LEN     12
11643
11644 static const u8 tg3_tso_header[] = {
11645 0x08, 0x00,
11646 0x45, 0x00, 0x00, 0x00,
11647 0x00, 0x00, 0x40, 0x00,
11648 0x40, 0x06, 0x00, 0x00,
11649 0x0a, 0x00, 0x00, 0x01,
11650 0x0a, 0x00, 0x00, 0x02,
11651 0x0d, 0x00, 0xe0, 0x00,
11652 0x00, 0x00, 0x01, 0x00,
11653 0x00, 0x00, 0x02, 0x00,
11654 0x80, 0x10, 0x10, 0x00,
11655 0x14, 0x09, 0x00, 0x00,
11656 0x01, 0x01, 0x08, 0x0a,
11657 0x11, 0x11, 0x11, 0x11,
11658 0x11, 0x11, 0x11, 0x11,
11659 };
11660
11661 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11662 {
11663         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11664         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11665         u32 budget;
11666         struct sk_buff *skb;
11667         u8 *tx_data, *rx_data;
11668         dma_addr_t map;
11669         int num_pkts, tx_len, rx_len, i, err;
11670         struct tg3_rx_buffer_desc *desc;
11671         struct tg3_napi *tnapi, *rnapi;
11672         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11673
11674         tnapi = &tp->napi[0];
11675         rnapi = &tp->napi[0];
11676         if (tp->irq_cnt > 1) {
11677                 if (tg3_flag(tp, ENABLE_RSS))
11678                         rnapi = &tp->napi[1];
11679                 if (tg3_flag(tp, ENABLE_TSS))
11680                         tnapi = &tp->napi[1];
11681         }
11682         coal_now = tnapi->coal_now | rnapi->coal_now;
11683
11684         err = -EIO;
11685
11686         tx_len = pktsz;
11687         skb = netdev_alloc_skb(tp->dev, tx_len);
11688         if (!skb)
11689                 return -ENOMEM;
11690
11691         tx_data = skb_put(skb, tx_len);
11692         memcpy(tx_data, tp->dev->dev_addr, 6);
11693         memset(tx_data + 6, 0x0, 8);
11694
11695         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11696
11697         if (tso_loopback) {
11698                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11699
11700                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11701                               TG3_TSO_TCP_OPT_LEN;
11702
11703                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11704                        sizeof(tg3_tso_header));
11705                 mss = TG3_TSO_MSS;
11706
11707                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11708                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11709
11710                 /* Set the total length field in the IP header */
11711                 iph->tot_len = htons((u16)(mss + hdr_len));
11712
11713                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11714                               TXD_FLAG_CPU_POST_DMA);
11715
11716                 if (tg3_flag(tp, HW_TSO_1) ||
11717                     tg3_flag(tp, HW_TSO_2) ||
11718                     tg3_flag(tp, HW_TSO_3)) {
11719                         struct tcphdr *th;
11720                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11721                         th = (struct tcphdr *)&tx_data[val];
11722                         th->check = 0;
11723                 } else
11724                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11725
11726                 if (tg3_flag(tp, HW_TSO_3)) {
11727                         mss |= (hdr_len & 0xc) << 12;
11728                         if (hdr_len & 0x10)
11729                                 base_flags |= 0x00000010;
11730                         base_flags |= (hdr_len & 0x3e0) << 5;
11731                 } else if (tg3_flag(tp, HW_TSO_2))
11732                         mss |= hdr_len << 9;
11733                 else if (tg3_flag(tp, HW_TSO_1) ||
11734                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11735                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11736                 } else {
11737                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11738                 }
11739
11740                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11741         } else {
11742                 num_pkts = 1;
11743                 data_off = ETH_HLEN;
11744         }
11745
11746         for (i = data_off; i < tx_len; i++)
11747                 tx_data[i] = (u8) (i & 0xff);
11748
11749         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11750         if (pci_dma_mapping_error(tp->pdev, map)) {
11751                 dev_kfree_skb(skb);
11752                 return -EIO;
11753         }
11754
11755         val = tnapi->tx_prod;
11756         tnapi->tx_buffers[val].skb = skb;
11757         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11758
11759         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11760                rnapi->coal_now);
11761
11762         udelay(10);
11763
11764         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11765
11766         budget = tg3_tx_avail(tnapi);
11767         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11768                             base_flags | TXD_FLAG_END, mss, 0)) {
11769                 tnapi->tx_buffers[val].skb = NULL;
11770                 dev_kfree_skb(skb);
11771                 return -EIO;
11772         }
11773
11774         tnapi->tx_prod++;
11775
11776         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11777         tr32_mailbox(tnapi->prodmbox);
11778
11779         udelay(10);
11780
11781         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11782         for (i = 0; i < 35; i++) {
11783                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11784                        coal_now);
11785
11786                 udelay(10);
11787
11788                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11789                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11790                 if ((tx_idx == tnapi->tx_prod) &&
11791                     (rx_idx == (rx_start_idx + num_pkts)))
11792                         break;
11793         }
11794
11795         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11796         dev_kfree_skb(skb);
11797
11798         if (tx_idx != tnapi->tx_prod)
11799                 goto out;
11800
11801         if (rx_idx != rx_start_idx + num_pkts)
11802                 goto out;
11803
11804         val = data_off;
11805         while (rx_idx != rx_start_idx) {
11806                 desc = &rnapi->rx_rcb[rx_start_idx++];
11807                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11808                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11809
11810                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11811                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11812                         goto out;
11813
11814                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11815                          - ETH_FCS_LEN;
11816
11817                 if (!tso_loopback) {
11818                         if (rx_len != tx_len)
11819                                 goto out;
11820
11821                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11822                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11823                                         goto out;
11824                         } else {
11825                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11826                                         goto out;
11827                         }
11828                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11829                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11830                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11831                         goto out;
11832                 }
11833
11834                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11835                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11836                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11837                                              mapping);
11838                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11839                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11840                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11841                                              mapping);
11842                 } else
11843                         goto out;
11844
11845                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11846                                             PCI_DMA_FROMDEVICE);
11847
11848                 rx_data += TG3_RX_OFFSET(tp);
11849                 for (i = data_off; i < rx_len; i++, val++) {
11850                         if (*(rx_data + i) != (u8) (val & 0xff))
11851                                 goto out;
11852                 }
11853         }
11854
11855         err = 0;
11856
11857         /* tg3_free_rings will unmap and free the rx_data */
11858 out:
11859         return err;
11860 }
11861
11862 #define TG3_STD_LOOPBACK_FAILED         1
11863 #define TG3_JMB_LOOPBACK_FAILED         2
11864 #define TG3_TSO_LOOPBACK_FAILED         4
11865 #define TG3_LOOPBACK_FAILED \
11866         (TG3_STD_LOOPBACK_FAILED | \
11867          TG3_JMB_LOOPBACK_FAILED | \
11868          TG3_TSO_LOOPBACK_FAILED)
11869
11870 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11871 {
11872         int err = -EIO;
11873         u32 eee_cap;
11874
11875         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11876         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11877
11878         if (!netif_running(tp->dev)) {
11879                 data[0] = TG3_LOOPBACK_FAILED;
11880                 data[1] = TG3_LOOPBACK_FAILED;
11881                 if (do_extlpbk)
11882                         data[2] = TG3_LOOPBACK_FAILED;
11883                 goto done;
11884         }
11885
11886         err = tg3_reset_hw(tp, 1);
11887         if (err) {
11888                 data[0] = TG3_LOOPBACK_FAILED;
11889                 data[1] = TG3_LOOPBACK_FAILED;
11890                 if (do_extlpbk)
11891                         data[2] = TG3_LOOPBACK_FAILED;
11892                 goto done;
11893         }
11894
11895         if (tg3_flag(tp, ENABLE_RSS)) {
11896                 int i;
11897
11898                 /* Reroute all rx packets to the 1st queue */
11899                 for (i = MAC_RSS_INDIR_TBL_0;
11900                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11901                         tw32(i, 0x0);
11902         }
11903
11904         /* HW errata - mac loopback fails in some cases on 5780.
11905          * Normal traffic and PHY loopback are not affected by
11906          * errata.  Also, the MAC loopback test is deprecated for
11907          * all newer ASIC revisions.
11908          */
11909         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11910             !tg3_flag(tp, CPMU_PRESENT)) {
11911                 tg3_mac_loopback(tp, true);
11912
11913                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11914                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11915
11916                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11917                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11918                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11919
11920                 tg3_mac_loopback(tp, false);
11921         }
11922
11923         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11924             !tg3_flag(tp, USE_PHYLIB)) {
11925                 int i;
11926
11927                 tg3_phy_lpbk_set(tp, 0, false);
11928
11929                 /* Wait for link */
11930                 for (i = 0; i < 100; i++) {
11931                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11932                                 break;
11933                         mdelay(1);
11934                 }
11935
11936                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11937                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11938                 if (tg3_flag(tp, TSO_CAPABLE) &&
11939                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11940                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11941                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11942                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11943                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11944
11945                 if (do_extlpbk) {
11946                         tg3_phy_lpbk_set(tp, 0, true);
11947
11948                         /* All link indications report up, but the hardware
11949                          * isn't really ready for about 20 msec.  Double it
11950                          * to be sure.
11951                          */
11952                         mdelay(40);
11953
11954                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11955                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11956                         if (tg3_flag(tp, TSO_CAPABLE) &&
11957                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11958                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11959                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11960                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11961                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11962                 }
11963
11964                 /* Re-enable gphy autopowerdown. */
11965                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11966                         tg3_phy_toggle_apd(tp, true);
11967         }
11968
11969         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11970
11971 done:
11972         tp->phy_flags |= eee_cap;
11973
11974         return err;
11975 }
11976
11977 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11978                           u64 *data)
11979 {
11980         struct tg3 *tp = netdev_priv(dev);
11981         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11982
11983         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11984             tg3_power_up(tp)) {
11985                 etest->flags |= ETH_TEST_FL_FAILED;
11986                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11987                 return;
11988         }
11989
11990         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11991
11992         if (tg3_test_nvram(tp) != 0) {
11993                 etest->flags |= ETH_TEST_FL_FAILED;
11994                 data[0] = 1;
11995         }
11996         if (!doextlpbk && tg3_test_link(tp)) {
11997                 etest->flags |= ETH_TEST_FL_FAILED;
11998                 data[1] = 1;
11999         }
12000         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12001                 int err, err2 = 0, irq_sync = 0;
12002
12003                 if (netif_running(dev)) {
12004                         tg3_phy_stop(tp);
12005                         tg3_netif_stop(tp);
12006                         irq_sync = 1;
12007                 }
12008
12009                 tg3_full_lock(tp, irq_sync);
12010
12011                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12012                 err = tg3_nvram_lock(tp);
12013                 tg3_halt_cpu(tp, RX_CPU_BASE);
12014                 if (!tg3_flag(tp, 5705_PLUS))
12015                         tg3_halt_cpu(tp, TX_CPU_BASE);
12016                 if (!err)
12017                         tg3_nvram_unlock(tp);
12018
12019                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12020                         tg3_phy_reset(tp);
12021
12022                 if (tg3_test_registers(tp) != 0) {
12023                         etest->flags |= ETH_TEST_FL_FAILED;
12024                         data[2] = 1;
12025                 }
12026
12027                 if (tg3_test_memory(tp) != 0) {
12028                         etest->flags |= ETH_TEST_FL_FAILED;
12029                         data[3] = 1;
12030                 }
12031
12032                 if (doextlpbk)
12033                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12034
12035                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12036                         etest->flags |= ETH_TEST_FL_FAILED;
12037
12038                 tg3_full_unlock(tp);
12039
12040                 if (tg3_test_interrupt(tp) != 0) {
12041                         etest->flags |= ETH_TEST_FL_FAILED;
12042                         data[7] = 1;
12043                 }
12044
12045                 tg3_full_lock(tp, 0);
12046
12047                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12048                 if (netif_running(dev)) {
12049                         tg3_flag_set(tp, INIT_COMPLETE);
12050                         err2 = tg3_restart_hw(tp, 1);
12051                         if (!err2)
12052                                 tg3_netif_start(tp);
12053                 }
12054
12055                 tg3_full_unlock(tp);
12056
12057                 if (irq_sync && !err2)
12058                         tg3_phy_start(tp);
12059         }
12060         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12061                 tg3_power_down(tp);
12062
12063 }
12064
12065 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12066 {
12067         struct mii_ioctl_data *data = if_mii(ifr);
12068         struct tg3 *tp = netdev_priv(dev);
12069         int err;
12070
12071         if (tg3_flag(tp, USE_PHYLIB)) {
12072                 struct phy_device *phydev;
12073                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12074                         return -EAGAIN;
12075                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12076                 return phy_mii_ioctl(phydev, ifr, cmd);
12077         }
12078
12079         switch (cmd) {
12080         case SIOCGMIIPHY:
12081                 data->phy_id = tp->phy_addr;
12082
12083                 /* fallthru */
12084         case SIOCGMIIREG: {
12085                 u32 mii_regval;
12086
12087                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12088                         break;                  /* We have no PHY */
12089
12090                 if (!netif_running(dev))
12091                         return -EAGAIN;
12092
12093                 spin_lock_bh(&tp->lock);
12094                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12095                 spin_unlock_bh(&tp->lock);
12096
12097                 data->val_out = mii_regval;
12098
12099                 return err;
12100         }
12101
12102         case SIOCSMIIREG:
12103                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12104                         break;                  /* We have no PHY */
12105
12106                 if (!netif_running(dev))
12107                         return -EAGAIN;
12108
12109                 spin_lock_bh(&tp->lock);
12110                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12111                 spin_unlock_bh(&tp->lock);
12112
12113                 return err;
12114
12115         default:
12116                 /* do nothing */
12117                 break;
12118         }
12119         return -EOPNOTSUPP;
12120 }
12121
12122 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12123 {
12124         struct tg3 *tp = netdev_priv(dev);
12125
12126         memcpy(ec, &tp->coal, sizeof(*ec));
12127         return 0;
12128 }
12129
12130 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12131 {
12132         struct tg3 *tp = netdev_priv(dev);
12133         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12134         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12135
12136         if (!tg3_flag(tp, 5705_PLUS)) {
12137                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12138                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12139                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12140                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12141         }
12142
12143         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12144             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12145             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12146             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12147             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12148             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12149             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12150             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12151             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12152             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12153                 return -EINVAL;
12154
12155         /* No rx interrupts will be generated if both are zero */
12156         if ((ec->rx_coalesce_usecs == 0) &&
12157             (ec->rx_max_coalesced_frames == 0))
12158                 return -EINVAL;
12159
12160         /* No tx interrupts will be generated if both are zero */
12161         if ((ec->tx_coalesce_usecs == 0) &&
12162             (ec->tx_max_coalesced_frames == 0))
12163                 return -EINVAL;
12164
12165         /* Only copy relevant parameters, ignore all others. */
12166         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12167         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12168         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12169         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12170         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12171         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12172         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12173         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12174         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12175
12176         if (netif_running(dev)) {
12177                 tg3_full_lock(tp, 0);
12178                 __tg3_set_coalesce(tp, &tp->coal);
12179                 tg3_full_unlock(tp);
12180         }
12181         return 0;
12182 }
12183
12184 static const struct ethtool_ops tg3_ethtool_ops = {
12185         .get_settings           = tg3_get_settings,
12186         .set_settings           = tg3_set_settings,
12187         .get_drvinfo            = tg3_get_drvinfo,
12188         .get_regs_len           = tg3_get_regs_len,
12189         .get_regs               = tg3_get_regs,
12190         .get_wol                = tg3_get_wol,
12191         .set_wol                = tg3_set_wol,
12192         .get_msglevel           = tg3_get_msglevel,
12193         .set_msglevel           = tg3_set_msglevel,
12194         .nway_reset             = tg3_nway_reset,
12195         .get_link               = ethtool_op_get_link,
12196         .get_eeprom_len         = tg3_get_eeprom_len,
12197         .get_eeprom             = tg3_get_eeprom,
12198         .set_eeprom             = tg3_set_eeprom,
12199         .get_ringparam          = tg3_get_ringparam,
12200         .set_ringparam          = tg3_set_ringparam,
12201         .get_pauseparam         = tg3_get_pauseparam,
12202         .set_pauseparam         = tg3_set_pauseparam,
12203         .self_test              = tg3_self_test,
12204         .get_strings            = tg3_get_strings,
12205         .set_phys_id            = tg3_set_phys_id,
12206         .get_ethtool_stats      = tg3_get_ethtool_stats,
12207         .get_coalesce           = tg3_get_coalesce,
12208         .set_coalesce           = tg3_set_coalesce,
12209         .get_sset_count         = tg3_get_sset_count,
12210         .get_rxnfc              = tg3_get_rxnfc,
12211         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12212         .get_rxfh_indir         = tg3_get_rxfh_indir,
12213         .set_rxfh_indir         = tg3_set_rxfh_indir,
12214 };
12215
12216 static void tg3_set_rx_mode(struct net_device *dev)
12217 {
12218         struct tg3 *tp = netdev_priv(dev);
12219
12220         if (!netif_running(dev))
12221                 return;
12222
12223         tg3_full_lock(tp, 0);
12224         __tg3_set_rx_mode(dev);
12225         tg3_full_unlock(tp);
12226 }
12227
12228 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12229                                int new_mtu)
12230 {
12231         dev->mtu = new_mtu;
12232
12233         if (new_mtu > ETH_DATA_LEN) {
12234                 if (tg3_flag(tp, 5780_CLASS)) {
12235                         netdev_update_features(dev);
12236                         tg3_flag_clear(tp, TSO_CAPABLE);
12237                 } else {
12238                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12239                 }
12240         } else {
12241                 if (tg3_flag(tp, 5780_CLASS)) {
12242                         tg3_flag_set(tp, TSO_CAPABLE);
12243                         netdev_update_features(dev);
12244                 }
12245                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12246         }
12247 }
12248
12249 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12250 {
12251         struct tg3 *tp = netdev_priv(dev);
12252         int err;
12253
12254         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12255                 return -EINVAL;
12256
12257         if (!netif_running(dev)) {
12258                 /* We'll just catch it later when the
12259                  * device is up'd.
12260                  */
12261                 tg3_set_mtu(dev, tp, new_mtu);
12262                 return 0;
12263         }
12264
12265         tg3_phy_stop(tp);
12266
12267         tg3_netif_stop(tp);
12268
12269         tg3_full_lock(tp, 1);
12270
12271         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12272
12273         tg3_set_mtu(dev, tp, new_mtu);
12274
12275         err = tg3_restart_hw(tp, 0);
12276
12277         if (!err)
12278                 tg3_netif_start(tp);
12279
12280         tg3_full_unlock(tp);
12281
12282         if (!err)
12283                 tg3_phy_start(tp);
12284
12285         return err;
12286 }
12287
12288 static const struct net_device_ops tg3_netdev_ops = {
12289         .ndo_open               = tg3_open,
12290         .ndo_stop               = tg3_close,
12291         .ndo_start_xmit         = tg3_start_xmit,
12292         .ndo_get_stats64        = tg3_get_stats64,
12293         .ndo_validate_addr      = eth_validate_addr,
12294         .ndo_set_rx_mode        = tg3_set_rx_mode,
12295         .ndo_set_mac_address    = tg3_set_mac_addr,
12296         .ndo_do_ioctl           = tg3_ioctl,
12297         .ndo_tx_timeout         = tg3_tx_timeout,
12298         .ndo_change_mtu         = tg3_change_mtu,
12299         .ndo_fix_features       = tg3_fix_features,
12300         .ndo_set_features       = tg3_set_features,
12301 #ifdef CONFIG_NET_POLL_CONTROLLER
12302         .ndo_poll_controller    = tg3_poll_controller,
12303 #endif
12304 };
12305
12306 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12307 {
12308         u32 cursize, val, magic;
12309
12310         tp->nvram_size = EEPROM_CHIP_SIZE;
12311
12312         if (tg3_nvram_read(tp, 0, &magic) != 0)
12313                 return;
12314
12315         if ((magic != TG3_EEPROM_MAGIC) &&
12316             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12317             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12318                 return;
12319
12320         /*
12321          * Size the chip by reading offsets at increasing powers of two.
12322          * When we encounter our validation signature, we know the addressing
12323          * has wrapped around, and thus have our chip size.
12324          */
12325         cursize = 0x10;
12326
12327         while (cursize < tp->nvram_size) {
12328                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12329                         return;
12330
12331                 if (val == magic)
12332                         break;
12333
12334                 cursize <<= 1;
12335         }
12336
12337         tp->nvram_size = cursize;
12338 }
12339
12340 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12341 {
12342         u32 val;
12343
12344         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12345                 return;
12346
12347         /* Selfboot format */
12348         if (val != TG3_EEPROM_MAGIC) {
12349                 tg3_get_eeprom_size(tp);
12350                 return;
12351         }
12352
12353         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12354                 if (val != 0) {
12355                         /* This is confusing.  We want to operate on the
12356                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12357                          * call will read from NVRAM and byteswap the data
12358                          * according to the byteswapping settings for all
12359                          * other register accesses.  This ensures the data we
12360                          * want will always reside in the lower 16-bits.
12361                          * However, the data in NVRAM is in LE format, which
12362                          * means the data from the NVRAM read will always be
12363                          * opposite the endianness of the CPU.  The 16-bit
12364                          * byteswap then brings the data to CPU endianness.
12365                          */
12366                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12367                         return;
12368                 }
12369         }
12370         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12371 }
12372
12373 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12374 {
12375         u32 nvcfg1;
12376
12377         nvcfg1 = tr32(NVRAM_CFG1);
12378         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12379                 tg3_flag_set(tp, FLASH);
12380         } else {
12381                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12382                 tw32(NVRAM_CFG1, nvcfg1);
12383         }
12384
12385         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12386             tg3_flag(tp, 5780_CLASS)) {
12387                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12388                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12389                         tp->nvram_jedecnum = JEDEC_ATMEL;
12390                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12391                         tg3_flag_set(tp, NVRAM_BUFFERED);
12392                         break;
12393                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12394                         tp->nvram_jedecnum = JEDEC_ATMEL;
12395                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12396                         break;
12397                 case FLASH_VENDOR_ATMEL_EEPROM:
12398                         tp->nvram_jedecnum = JEDEC_ATMEL;
12399                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12400                         tg3_flag_set(tp, NVRAM_BUFFERED);
12401                         break;
12402                 case FLASH_VENDOR_ST:
12403                         tp->nvram_jedecnum = JEDEC_ST;
12404                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12405                         tg3_flag_set(tp, NVRAM_BUFFERED);
12406                         break;
12407                 case FLASH_VENDOR_SAIFUN:
12408                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12409                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12410                         break;
12411                 case FLASH_VENDOR_SST_SMALL:
12412                 case FLASH_VENDOR_SST_LARGE:
12413                         tp->nvram_jedecnum = JEDEC_SST;
12414                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12415                         break;
12416                 }
12417         } else {
12418                 tp->nvram_jedecnum = JEDEC_ATMEL;
12419                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12420                 tg3_flag_set(tp, NVRAM_BUFFERED);
12421         }
12422 }
12423
12424 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12425 {
12426         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12427         case FLASH_5752PAGE_SIZE_256:
12428                 tp->nvram_pagesize = 256;
12429                 break;
12430         case FLASH_5752PAGE_SIZE_512:
12431                 tp->nvram_pagesize = 512;
12432                 break;
12433         case FLASH_5752PAGE_SIZE_1K:
12434                 tp->nvram_pagesize = 1024;
12435                 break;
12436         case FLASH_5752PAGE_SIZE_2K:
12437                 tp->nvram_pagesize = 2048;
12438                 break;
12439         case FLASH_5752PAGE_SIZE_4K:
12440                 tp->nvram_pagesize = 4096;
12441                 break;
12442         case FLASH_5752PAGE_SIZE_264:
12443                 tp->nvram_pagesize = 264;
12444                 break;
12445         case FLASH_5752PAGE_SIZE_528:
12446                 tp->nvram_pagesize = 528;
12447                 break;
12448         }
12449 }
12450
12451 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12452 {
12453         u32 nvcfg1;
12454
12455         nvcfg1 = tr32(NVRAM_CFG1);
12456
12457         /* NVRAM protection for TPM */
12458         if (nvcfg1 & (1 << 27))
12459                 tg3_flag_set(tp, PROTECTED_NVRAM);
12460
12461         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12462         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12463         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12464                 tp->nvram_jedecnum = JEDEC_ATMEL;
12465                 tg3_flag_set(tp, NVRAM_BUFFERED);
12466                 break;
12467         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12468                 tp->nvram_jedecnum = JEDEC_ATMEL;
12469                 tg3_flag_set(tp, NVRAM_BUFFERED);
12470                 tg3_flag_set(tp, FLASH);
12471                 break;
12472         case FLASH_5752VENDOR_ST_M45PE10:
12473         case FLASH_5752VENDOR_ST_M45PE20:
12474         case FLASH_5752VENDOR_ST_M45PE40:
12475                 tp->nvram_jedecnum = JEDEC_ST;
12476                 tg3_flag_set(tp, NVRAM_BUFFERED);
12477                 tg3_flag_set(tp, FLASH);
12478                 break;
12479         }
12480
12481         if (tg3_flag(tp, FLASH)) {
12482                 tg3_nvram_get_pagesize(tp, nvcfg1);
12483         } else {
12484                 /* For eeprom, set pagesize to maximum eeprom size */
12485                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12486
12487                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12488                 tw32(NVRAM_CFG1, nvcfg1);
12489         }
12490 }
12491
12492 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12493 {
12494         u32 nvcfg1, protect = 0;
12495
12496         nvcfg1 = tr32(NVRAM_CFG1);
12497
12498         /* NVRAM protection for TPM */
12499         if (nvcfg1 & (1 << 27)) {
12500                 tg3_flag_set(tp, PROTECTED_NVRAM);
12501                 protect = 1;
12502         }
12503
12504         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12505         switch (nvcfg1) {
12506         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12507         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12508         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12509         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12510                 tp->nvram_jedecnum = JEDEC_ATMEL;
12511                 tg3_flag_set(tp, NVRAM_BUFFERED);
12512                 tg3_flag_set(tp, FLASH);
12513                 tp->nvram_pagesize = 264;
12514                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12515                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12516                         tp->nvram_size = (protect ? 0x3e200 :
12517                                           TG3_NVRAM_SIZE_512KB);
12518                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12519                         tp->nvram_size = (protect ? 0x1f200 :
12520                                           TG3_NVRAM_SIZE_256KB);
12521                 else
12522                         tp->nvram_size = (protect ? 0x1f200 :
12523                                           TG3_NVRAM_SIZE_128KB);
12524                 break;
12525         case FLASH_5752VENDOR_ST_M45PE10:
12526         case FLASH_5752VENDOR_ST_M45PE20:
12527         case FLASH_5752VENDOR_ST_M45PE40:
12528                 tp->nvram_jedecnum = JEDEC_ST;
12529                 tg3_flag_set(tp, NVRAM_BUFFERED);
12530                 tg3_flag_set(tp, FLASH);
12531                 tp->nvram_pagesize = 256;
12532                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12533                         tp->nvram_size = (protect ?
12534                                           TG3_NVRAM_SIZE_64KB :
12535                                           TG3_NVRAM_SIZE_128KB);
12536                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12537                         tp->nvram_size = (protect ?
12538                                           TG3_NVRAM_SIZE_64KB :
12539                                           TG3_NVRAM_SIZE_256KB);
12540                 else
12541                         tp->nvram_size = (protect ?
12542                                           TG3_NVRAM_SIZE_128KB :
12543                                           TG3_NVRAM_SIZE_512KB);
12544                 break;
12545         }
12546 }
12547
12548 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12549 {
12550         u32 nvcfg1;
12551
12552         nvcfg1 = tr32(NVRAM_CFG1);
12553
12554         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12555         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12556         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12557         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12558         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12559                 tp->nvram_jedecnum = JEDEC_ATMEL;
12560                 tg3_flag_set(tp, NVRAM_BUFFERED);
12561                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12562
12563                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12564                 tw32(NVRAM_CFG1, nvcfg1);
12565                 break;
12566         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12567         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12568         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12569         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12570                 tp->nvram_jedecnum = JEDEC_ATMEL;
12571                 tg3_flag_set(tp, NVRAM_BUFFERED);
12572                 tg3_flag_set(tp, FLASH);
12573                 tp->nvram_pagesize = 264;
12574                 break;
12575         case FLASH_5752VENDOR_ST_M45PE10:
12576         case FLASH_5752VENDOR_ST_M45PE20:
12577         case FLASH_5752VENDOR_ST_M45PE40:
12578                 tp->nvram_jedecnum = JEDEC_ST;
12579                 tg3_flag_set(tp, NVRAM_BUFFERED);
12580                 tg3_flag_set(tp, FLASH);
12581                 tp->nvram_pagesize = 256;
12582                 break;
12583         }
12584 }
12585
12586 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12587 {
12588         u32 nvcfg1, protect = 0;
12589
12590         nvcfg1 = tr32(NVRAM_CFG1);
12591
12592         /* NVRAM protection for TPM */
12593         if (nvcfg1 & (1 << 27)) {
12594                 tg3_flag_set(tp, PROTECTED_NVRAM);
12595                 protect = 1;
12596         }
12597
12598         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12599         switch (nvcfg1) {
12600         case FLASH_5761VENDOR_ATMEL_ADB021D:
12601         case FLASH_5761VENDOR_ATMEL_ADB041D:
12602         case FLASH_5761VENDOR_ATMEL_ADB081D:
12603         case FLASH_5761VENDOR_ATMEL_ADB161D:
12604         case FLASH_5761VENDOR_ATMEL_MDB021D:
12605         case FLASH_5761VENDOR_ATMEL_MDB041D:
12606         case FLASH_5761VENDOR_ATMEL_MDB081D:
12607         case FLASH_5761VENDOR_ATMEL_MDB161D:
12608                 tp->nvram_jedecnum = JEDEC_ATMEL;
12609                 tg3_flag_set(tp, NVRAM_BUFFERED);
12610                 tg3_flag_set(tp, FLASH);
12611                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12612                 tp->nvram_pagesize = 256;
12613                 break;
12614         case FLASH_5761VENDOR_ST_A_M45PE20:
12615         case FLASH_5761VENDOR_ST_A_M45PE40:
12616         case FLASH_5761VENDOR_ST_A_M45PE80:
12617         case FLASH_5761VENDOR_ST_A_M45PE16:
12618         case FLASH_5761VENDOR_ST_M_M45PE20:
12619         case FLASH_5761VENDOR_ST_M_M45PE40:
12620         case FLASH_5761VENDOR_ST_M_M45PE80:
12621         case FLASH_5761VENDOR_ST_M_M45PE16:
12622                 tp->nvram_jedecnum = JEDEC_ST;
12623                 tg3_flag_set(tp, NVRAM_BUFFERED);
12624                 tg3_flag_set(tp, FLASH);
12625                 tp->nvram_pagesize = 256;
12626                 break;
12627         }
12628
12629         if (protect) {
12630                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12631         } else {
12632                 switch (nvcfg1) {
12633                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12634                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12635                 case FLASH_5761VENDOR_ST_A_M45PE16:
12636                 case FLASH_5761VENDOR_ST_M_M45PE16:
12637                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12638                         break;
12639                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12640                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12641                 case FLASH_5761VENDOR_ST_A_M45PE80:
12642                 case FLASH_5761VENDOR_ST_M_M45PE80:
12643                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12644                         break;
12645                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12646                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12647                 case FLASH_5761VENDOR_ST_A_M45PE40:
12648                 case FLASH_5761VENDOR_ST_M_M45PE40:
12649                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12650                         break;
12651                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12652                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12653                 case FLASH_5761VENDOR_ST_A_M45PE20:
12654                 case FLASH_5761VENDOR_ST_M_M45PE20:
12655                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12656                         break;
12657                 }
12658         }
12659 }
12660
12661 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12662 {
12663         tp->nvram_jedecnum = JEDEC_ATMEL;
12664         tg3_flag_set(tp, NVRAM_BUFFERED);
12665         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12666 }
12667
12668 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12669 {
12670         u32 nvcfg1;
12671
12672         nvcfg1 = tr32(NVRAM_CFG1);
12673
12674         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12675         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12676         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12677                 tp->nvram_jedecnum = JEDEC_ATMEL;
12678                 tg3_flag_set(tp, NVRAM_BUFFERED);
12679                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12680
12681                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12682                 tw32(NVRAM_CFG1, nvcfg1);
12683                 return;
12684         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12685         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12686         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12687         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12688         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12689         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12690         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12691                 tp->nvram_jedecnum = JEDEC_ATMEL;
12692                 tg3_flag_set(tp, NVRAM_BUFFERED);
12693                 tg3_flag_set(tp, FLASH);
12694
12695                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12696                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12697                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12698                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12699                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12700                         break;
12701                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12702                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12703                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12704                         break;
12705                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12706                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12707                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12708                         break;
12709                 }
12710                 break;
12711         case FLASH_5752VENDOR_ST_M45PE10:
12712         case FLASH_5752VENDOR_ST_M45PE20:
12713         case FLASH_5752VENDOR_ST_M45PE40:
12714                 tp->nvram_jedecnum = JEDEC_ST;
12715                 tg3_flag_set(tp, NVRAM_BUFFERED);
12716                 tg3_flag_set(tp, FLASH);
12717
12718                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12719                 case FLASH_5752VENDOR_ST_M45PE10:
12720                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12721                         break;
12722                 case FLASH_5752VENDOR_ST_M45PE20:
12723                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12724                         break;
12725                 case FLASH_5752VENDOR_ST_M45PE40:
12726                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12727                         break;
12728                 }
12729                 break;
12730         default:
12731                 tg3_flag_set(tp, NO_NVRAM);
12732                 return;
12733         }
12734
12735         tg3_nvram_get_pagesize(tp, nvcfg1);
12736         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12737                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12738 }
12739
12740
12741 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12742 {
12743         u32 nvcfg1;
12744
12745         nvcfg1 = tr32(NVRAM_CFG1);
12746
12747         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12748         case FLASH_5717VENDOR_ATMEL_EEPROM:
12749         case FLASH_5717VENDOR_MICRO_EEPROM:
12750                 tp->nvram_jedecnum = JEDEC_ATMEL;
12751                 tg3_flag_set(tp, NVRAM_BUFFERED);
12752                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12753
12754                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12755                 tw32(NVRAM_CFG1, nvcfg1);
12756                 return;
12757         case FLASH_5717VENDOR_ATMEL_MDB011D:
12758         case FLASH_5717VENDOR_ATMEL_ADB011B:
12759         case FLASH_5717VENDOR_ATMEL_ADB011D:
12760         case FLASH_5717VENDOR_ATMEL_MDB021D:
12761         case FLASH_5717VENDOR_ATMEL_ADB021B:
12762         case FLASH_5717VENDOR_ATMEL_ADB021D:
12763         case FLASH_5717VENDOR_ATMEL_45USPT:
12764                 tp->nvram_jedecnum = JEDEC_ATMEL;
12765                 tg3_flag_set(tp, NVRAM_BUFFERED);
12766                 tg3_flag_set(tp, FLASH);
12767
12768                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12769                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12770                         /* Detect size with tg3_nvram_get_size() */
12771                         break;
12772                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12773                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12774                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12775                         break;
12776                 default:
12777                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12778                         break;
12779                 }
12780                 break;
12781         case FLASH_5717VENDOR_ST_M_M25PE10:
12782         case FLASH_5717VENDOR_ST_A_M25PE10:
12783         case FLASH_5717VENDOR_ST_M_M45PE10:
12784         case FLASH_5717VENDOR_ST_A_M45PE10:
12785         case FLASH_5717VENDOR_ST_M_M25PE20:
12786         case FLASH_5717VENDOR_ST_A_M25PE20:
12787         case FLASH_5717VENDOR_ST_M_M45PE20:
12788         case FLASH_5717VENDOR_ST_A_M45PE20:
12789         case FLASH_5717VENDOR_ST_25USPT:
12790         case FLASH_5717VENDOR_ST_45USPT:
12791                 tp->nvram_jedecnum = JEDEC_ST;
12792                 tg3_flag_set(tp, NVRAM_BUFFERED);
12793                 tg3_flag_set(tp, FLASH);
12794
12795                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12796                 case FLASH_5717VENDOR_ST_M_M25PE20:
12797                 case FLASH_5717VENDOR_ST_M_M45PE20:
12798                         /* Detect size with tg3_nvram_get_size() */
12799                         break;
12800                 case FLASH_5717VENDOR_ST_A_M25PE20:
12801                 case FLASH_5717VENDOR_ST_A_M45PE20:
12802                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12803                         break;
12804                 default:
12805                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12806                         break;
12807                 }
12808                 break;
12809         default:
12810                 tg3_flag_set(tp, NO_NVRAM);
12811                 return;
12812         }
12813
12814         tg3_nvram_get_pagesize(tp, nvcfg1);
12815         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12816                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12817 }
12818
12819 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12820 {
12821         u32 nvcfg1, nvmpinstrp;
12822
12823         nvcfg1 = tr32(NVRAM_CFG1);
12824         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12825
12826         switch (nvmpinstrp) {
12827         case FLASH_5720_EEPROM_HD:
12828         case FLASH_5720_EEPROM_LD:
12829                 tp->nvram_jedecnum = JEDEC_ATMEL;
12830                 tg3_flag_set(tp, NVRAM_BUFFERED);
12831
12832                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12833                 tw32(NVRAM_CFG1, nvcfg1);
12834                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12835                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12836                 else
12837                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12838                 return;
12839         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12840         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12841         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12842         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12843         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12844         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12845         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12846         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12847         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12848         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12849         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12850         case FLASH_5720VENDOR_ATMEL_45USPT:
12851                 tp->nvram_jedecnum = JEDEC_ATMEL;
12852                 tg3_flag_set(tp, NVRAM_BUFFERED);
12853                 tg3_flag_set(tp, FLASH);
12854
12855                 switch (nvmpinstrp) {
12856                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12857                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12858                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12859                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12860                         break;
12861                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12862                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12863                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12864                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12865                         break;
12866                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12867                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12868                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12869                         break;
12870                 default:
12871                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12872                         break;
12873                 }
12874                 break;
12875         case FLASH_5720VENDOR_M_ST_M25PE10:
12876         case FLASH_5720VENDOR_M_ST_M45PE10:
12877         case FLASH_5720VENDOR_A_ST_M25PE10:
12878         case FLASH_5720VENDOR_A_ST_M45PE10:
12879         case FLASH_5720VENDOR_M_ST_M25PE20:
12880         case FLASH_5720VENDOR_M_ST_M45PE20:
12881         case FLASH_5720VENDOR_A_ST_M25PE20:
12882         case FLASH_5720VENDOR_A_ST_M45PE20:
12883         case FLASH_5720VENDOR_M_ST_M25PE40:
12884         case FLASH_5720VENDOR_M_ST_M45PE40:
12885         case FLASH_5720VENDOR_A_ST_M25PE40:
12886         case FLASH_5720VENDOR_A_ST_M45PE40:
12887         case FLASH_5720VENDOR_M_ST_M25PE80:
12888         case FLASH_5720VENDOR_M_ST_M45PE80:
12889         case FLASH_5720VENDOR_A_ST_M25PE80:
12890         case FLASH_5720VENDOR_A_ST_M45PE80:
12891         case FLASH_5720VENDOR_ST_25USPT:
12892         case FLASH_5720VENDOR_ST_45USPT:
12893                 tp->nvram_jedecnum = JEDEC_ST;
12894                 tg3_flag_set(tp, NVRAM_BUFFERED);
12895                 tg3_flag_set(tp, FLASH);
12896
12897                 switch (nvmpinstrp) {
12898                 case FLASH_5720VENDOR_M_ST_M25PE20:
12899                 case FLASH_5720VENDOR_M_ST_M45PE20:
12900                 case FLASH_5720VENDOR_A_ST_M25PE20:
12901                 case FLASH_5720VENDOR_A_ST_M45PE20:
12902                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12903                         break;
12904                 case FLASH_5720VENDOR_M_ST_M25PE40:
12905                 case FLASH_5720VENDOR_M_ST_M45PE40:
12906                 case FLASH_5720VENDOR_A_ST_M25PE40:
12907                 case FLASH_5720VENDOR_A_ST_M45PE40:
12908                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12909                         break;
12910                 case FLASH_5720VENDOR_M_ST_M25PE80:
12911                 case FLASH_5720VENDOR_M_ST_M45PE80:
12912                 case FLASH_5720VENDOR_A_ST_M25PE80:
12913                 case FLASH_5720VENDOR_A_ST_M45PE80:
12914                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12915                         break;
12916                 default:
12917                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12918                         break;
12919                 }
12920                 break;
12921         default:
12922                 tg3_flag_set(tp, NO_NVRAM);
12923                 return;
12924         }
12925
12926         tg3_nvram_get_pagesize(tp, nvcfg1);
12927         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12928                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12929 }
12930
12931 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12932 static void __devinit tg3_nvram_init(struct tg3 *tp)
12933 {
12934         tw32_f(GRC_EEPROM_ADDR,
12935              (EEPROM_ADDR_FSM_RESET |
12936               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12937                EEPROM_ADDR_CLKPERD_SHIFT)));
12938
12939         msleep(1);
12940
12941         /* Enable seeprom accesses. */
12942         tw32_f(GRC_LOCAL_CTRL,
12943              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12944         udelay(100);
12945
12946         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12947             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12948                 tg3_flag_set(tp, NVRAM);
12949
12950                 if (tg3_nvram_lock(tp)) {
12951                         netdev_warn(tp->dev,
12952                                     "Cannot get nvram lock, %s failed\n",
12953                                     __func__);
12954                         return;
12955                 }
12956                 tg3_enable_nvram_access(tp);
12957
12958                 tp->nvram_size = 0;
12959
12960                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12961                         tg3_get_5752_nvram_info(tp);
12962                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12963                         tg3_get_5755_nvram_info(tp);
12964                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12965                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12966                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12967                         tg3_get_5787_nvram_info(tp);
12968                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12969                         tg3_get_5761_nvram_info(tp);
12970                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12971                         tg3_get_5906_nvram_info(tp);
12972                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12973                          tg3_flag(tp, 57765_CLASS))
12974                         tg3_get_57780_nvram_info(tp);
12975                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12976                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12977                         tg3_get_5717_nvram_info(tp);
12978                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12979                         tg3_get_5720_nvram_info(tp);
12980                 else
12981                         tg3_get_nvram_info(tp);
12982
12983                 if (tp->nvram_size == 0)
12984                         tg3_get_nvram_size(tp);
12985
12986                 tg3_disable_nvram_access(tp);
12987                 tg3_nvram_unlock(tp);
12988
12989         } else {
12990                 tg3_flag_clear(tp, NVRAM);
12991                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12992
12993                 tg3_get_eeprom_size(tp);
12994         }
12995 }
12996
12997 struct subsys_tbl_ent {
12998         u16 subsys_vendor, subsys_devid;
12999         u32 phy_id;
13000 };
13001
13002 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13003         /* Broadcom boards. */
13004         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13005           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13006         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13007           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13008         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13009           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13010         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13011           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13012         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13013           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13014         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13015           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13016         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13017           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13018         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13019           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13020         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13021           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13022         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13023           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13024         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13025           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13026
13027         /* 3com boards. */
13028         { TG3PCI_SUBVENDOR_ID_3COM,
13029           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13030         { TG3PCI_SUBVENDOR_ID_3COM,
13031           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13032         { TG3PCI_SUBVENDOR_ID_3COM,
13033           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13034         { TG3PCI_SUBVENDOR_ID_3COM,
13035           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13036         { TG3PCI_SUBVENDOR_ID_3COM,
13037           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13038
13039         /* DELL boards. */
13040         { TG3PCI_SUBVENDOR_ID_DELL,
13041           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13042         { TG3PCI_SUBVENDOR_ID_DELL,
13043           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13044         { TG3PCI_SUBVENDOR_ID_DELL,
13045           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13046         { TG3PCI_SUBVENDOR_ID_DELL,
13047           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13048
13049         /* Compaq boards. */
13050         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13051           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13052         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13053           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13054         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13055           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13056         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13057           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13058         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13059           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13060
13061         /* IBM boards. */
13062         { TG3PCI_SUBVENDOR_ID_IBM,
13063           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13064 };
13065
13066 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13067 {
13068         int i;
13069
13070         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13071                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13072                      tp->pdev->subsystem_vendor) &&
13073                     (subsys_id_to_phy_id[i].subsys_devid ==
13074                      tp->pdev->subsystem_device))
13075                         return &subsys_id_to_phy_id[i];
13076         }
13077         return NULL;
13078 }
13079
13080 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13081 {
13082         u32 val;
13083
13084         tp->phy_id = TG3_PHY_ID_INVALID;
13085         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13086
13087         /* Assume an onboard device and WOL capable by default.  */
13088         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13089         tg3_flag_set(tp, WOL_CAP);
13090
13091         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13092                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13093                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13094                         tg3_flag_set(tp, IS_NIC);
13095                 }
13096                 val = tr32(VCPU_CFGSHDW);
13097                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13098                         tg3_flag_set(tp, ASPM_WORKAROUND);
13099                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13100                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13101                         tg3_flag_set(tp, WOL_ENABLE);
13102                         device_set_wakeup_enable(&tp->pdev->dev, true);
13103                 }
13104                 goto done;
13105         }
13106
13107         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13108         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13109                 u32 nic_cfg, led_cfg;
13110                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13111                 int eeprom_phy_serdes = 0;
13112
13113                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13114                 tp->nic_sram_data_cfg = nic_cfg;
13115
13116                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13117                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13118                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13119                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13120                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13121                     (ver > 0) && (ver < 0x100))
13122                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13123
13124                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13125                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13126
13127                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13128                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13129                         eeprom_phy_serdes = 1;
13130
13131                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13132                 if (nic_phy_id != 0) {
13133                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13134                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13135
13136                         eeprom_phy_id  = (id1 >> 16) << 10;
13137                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13138                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13139                 } else
13140                         eeprom_phy_id = 0;
13141
13142                 tp->phy_id = eeprom_phy_id;
13143                 if (eeprom_phy_serdes) {
13144                         if (!tg3_flag(tp, 5705_PLUS))
13145                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13146                         else
13147                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13148                 }
13149
13150                 if (tg3_flag(tp, 5750_PLUS))
13151                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13152                                     SHASTA_EXT_LED_MODE_MASK);
13153                 else
13154                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13155
13156                 switch (led_cfg) {
13157                 default:
13158                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13159                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13160                         break;
13161
13162                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13163                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13164                         break;
13165
13166                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13167                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13168
13169                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13170                          * read on some older 5700/5701 bootcode.
13171                          */
13172                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13173                             ASIC_REV_5700 ||
13174                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13175                             ASIC_REV_5701)
13176                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13177
13178                         break;
13179
13180                 case SHASTA_EXT_LED_SHARED:
13181                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13182                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13183                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13184                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13185                                                  LED_CTRL_MODE_PHY_2);
13186                         break;
13187
13188                 case SHASTA_EXT_LED_MAC:
13189                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13190                         break;
13191
13192                 case SHASTA_EXT_LED_COMBO:
13193                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13194                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13195                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13196                                                  LED_CTRL_MODE_PHY_2);
13197                         break;
13198
13199                 }
13200
13201                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13202                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13203                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13204                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13205
13206                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13207                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13208
13209                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13210                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13211                         if ((tp->pdev->subsystem_vendor ==
13212                              PCI_VENDOR_ID_ARIMA) &&
13213                             (tp->pdev->subsystem_device == 0x205a ||
13214                              tp->pdev->subsystem_device == 0x2063))
13215                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13216                 } else {
13217                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13218                         tg3_flag_set(tp, IS_NIC);
13219                 }
13220
13221                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13222                         tg3_flag_set(tp, ENABLE_ASF);
13223                         if (tg3_flag(tp, 5750_PLUS))
13224                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13225                 }
13226
13227                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13228                     tg3_flag(tp, 5750_PLUS))
13229                         tg3_flag_set(tp, ENABLE_APE);
13230
13231                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13232                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13233                         tg3_flag_clear(tp, WOL_CAP);
13234
13235                 if (tg3_flag(tp, WOL_CAP) &&
13236                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13237                         tg3_flag_set(tp, WOL_ENABLE);
13238                         device_set_wakeup_enable(&tp->pdev->dev, true);
13239                 }
13240
13241                 if (cfg2 & (1 << 17))
13242                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13243
13244                 /* serdes signal pre-emphasis in register 0x590 set by */
13245                 /* bootcode if bit 18 is set */
13246                 if (cfg2 & (1 << 18))
13247                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13248
13249                 if ((tg3_flag(tp, 57765_PLUS) ||
13250                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13251                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13252                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13253                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13254
13255                 if (tg3_flag(tp, PCI_EXPRESS) &&
13256                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13257                     !tg3_flag(tp, 57765_PLUS)) {
13258                         u32 cfg3;
13259
13260                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13261                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13262                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13263                 }
13264
13265                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13266                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13267                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13268                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13269                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13270                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13271         }
13272 done:
13273         if (tg3_flag(tp, WOL_CAP))
13274                 device_set_wakeup_enable(&tp->pdev->dev,
13275                                          tg3_flag(tp, WOL_ENABLE));
13276         else
13277                 device_set_wakeup_capable(&tp->pdev->dev, false);
13278 }
13279
13280 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13281 {
13282         int i;
13283         u32 val;
13284
13285         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13286         tw32(OTP_CTRL, cmd);
13287
13288         /* Wait for up to 1 ms for command to execute. */
13289         for (i = 0; i < 100; i++) {
13290                 val = tr32(OTP_STATUS);
13291                 if (val & OTP_STATUS_CMD_DONE)
13292                         break;
13293                 udelay(10);
13294         }
13295
13296         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13297 }
13298
13299 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13300  * configuration is a 32-bit value that straddles the alignment boundary.
13301  * We do two 32-bit reads and then shift and merge the results.
13302  */
13303 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13304 {
13305         u32 bhalf_otp, thalf_otp;
13306
13307         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13308
13309         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13310                 return 0;
13311
13312         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13313
13314         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13315                 return 0;
13316
13317         thalf_otp = tr32(OTP_READ_DATA);
13318
13319         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13320
13321         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13322                 return 0;
13323
13324         bhalf_otp = tr32(OTP_READ_DATA);
13325
13326         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13327 }
13328
13329 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13330 {
13331         u32 adv = ADVERTISED_Autoneg;
13332
13333         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13334                 adv |= ADVERTISED_1000baseT_Half |
13335                        ADVERTISED_1000baseT_Full;
13336
13337         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13338                 adv |= ADVERTISED_100baseT_Half |
13339                        ADVERTISED_100baseT_Full |
13340                        ADVERTISED_10baseT_Half |
13341                        ADVERTISED_10baseT_Full |
13342                        ADVERTISED_TP;
13343         else
13344                 adv |= ADVERTISED_FIBRE;
13345
13346         tp->link_config.advertising = adv;
13347         tp->link_config.speed = SPEED_INVALID;
13348         tp->link_config.duplex = DUPLEX_INVALID;
13349         tp->link_config.autoneg = AUTONEG_ENABLE;
13350         tp->link_config.active_speed = SPEED_INVALID;
13351         tp->link_config.active_duplex = DUPLEX_INVALID;
13352         tp->link_config.orig_speed = SPEED_INVALID;
13353         tp->link_config.orig_duplex = DUPLEX_INVALID;
13354         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13355 }
13356
13357 static int __devinit tg3_phy_probe(struct tg3 *tp)
13358 {
13359         u32 hw_phy_id_1, hw_phy_id_2;
13360         u32 hw_phy_id, hw_phy_id_masked;
13361         int err;
13362
13363         /* flow control autonegotiation is default behavior */
13364         tg3_flag_set(tp, PAUSE_AUTONEG);
13365         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13366
13367         if (tg3_flag(tp, USE_PHYLIB))
13368                 return tg3_phy_init(tp);
13369
13370         /* Reading the PHY ID register can conflict with ASF
13371          * firmware access to the PHY hardware.
13372          */
13373         err = 0;
13374         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13375                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13376         } else {
13377                 /* Now read the physical PHY_ID from the chip and verify
13378                  * that it is sane.  If it doesn't look good, we fall back
13379                  * to either the hard-coded table based PHY_ID and failing
13380                  * that the value found in the eeprom area.
13381                  */
13382                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13383                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13384
13385                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13386                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13387                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13388
13389                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13390         }
13391
13392         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13393                 tp->phy_id = hw_phy_id;
13394                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13395                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13396                 else
13397                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13398         } else {
13399                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13400                         /* Do nothing, phy ID already set up in
13401                          * tg3_get_eeprom_hw_cfg().
13402                          */
13403                 } else {
13404                         struct subsys_tbl_ent *p;
13405
13406                         /* No eeprom signature?  Try the hardcoded
13407                          * subsys device table.
13408                          */
13409                         p = tg3_lookup_by_subsys(tp);
13410                         if (!p)
13411                                 return -ENODEV;
13412
13413                         tp->phy_id = p->phy_id;
13414                         if (!tp->phy_id ||
13415                             tp->phy_id == TG3_PHY_ID_BCM8002)
13416                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13417                 }
13418         }
13419
13420         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13421             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13422              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13423              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13424               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13425              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13426               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13427                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13428
13429         tg3_phy_init_link_config(tp);
13430
13431         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13432             !tg3_flag(tp, ENABLE_APE) &&
13433             !tg3_flag(tp, ENABLE_ASF)) {
13434                 u32 bmsr, dummy;
13435
13436                 tg3_readphy(tp, MII_BMSR, &bmsr);
13437                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13438                     (bmsr & BMSR_LSTATUS))
13439                         goto skip_phy_reset;
13440
13441                 err = tg3_phy_reset(tp);
13442                 if (err)
13443                         return err;
13444
13445                 tg3_phy_set_wirespeed(tp);
13446
13447                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13448                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13449                                             tp->link_config.flowctrl);
13450
13451                         tg3_writephy(tp, MII_BMCR,
13452                                      BMCR_ANENABLE | BMCR_ANRESTART);
13453                 }
13454         }
13455
13456 skip_phy_reset:
13457         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13458                 err = tg3_init_5401phy_dsp(tp);
13459                 if (err)
13460                         return err;
13461
13462                 err = tg3_init_5401phy_dsp(tp);
13463         }
13464
13465         return err;
13466 }
13467
13468 static void __devinit tg3_read_vpd(struct tg3 *tp)
13469 {
13470         u8 *vpd_data;
13471         unsigned int block_end, rosize, len;
13472         u32 vpdlen;
13473         int j, i = 0;
13474
13475         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13476         if (!vpd_data)
13477                 goto out_no_vpd;
13478
13479         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13480         if (i < 0)
13481                 goto out_not_found;
13482
13483         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13484         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13485         i += PCI_VPD_LRDT_TAG_SIZE;
13486
13487         if (block_end > vpdlen)
13488                 goto out_not_found;
13489
13490         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13491                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13492         if (j > 0) {
13493                 len = pci_vpd_info_field_size(&vpd_data[j]);
13494
13495                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13496                 if (j + len > block_end || len != 4 ||
13497                     memcmp(&vpd_data[j], "1028", 4))
13498                         goto partno;
13499
13500                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13501                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13502                 if (j < 0)
13503                         goto partno;
13504
13505                 len = pci_vpd_info_field_size(&vpd_data[j]);
13506
13507                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13508                 if (j + len > block_end)
13509                         goto partno;
13510
13511                 memcpy(tp->fw_ver, &vpd_data[j], len);
13512                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13513         }
13514
13515 partno:
13516         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13517                                       PCI_VPD_RO_KEYWORD_PARTNO);
13518         if (i < 0)
13519                 goto out_not_found;
13520
13521         len = pci_vpd_info_field_size(&vpd_data[i]);
13522
13523         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13524         if (len > TG3_BPN_SIZE ||
13525             (len + i) > vpdlen)
13526                 goto out_not_found;
13527
13528         memcpy(tp->board_part_number, &vpd_data[i], len);
13529
13530 out_not_found:
13531         kfree(vpd_data);
13532         if (tp->board_part_number[0])
13533                 return;
13534
13535 out_no_vpd:
13536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13537                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13538                         strcpy(tp->board_part_number, "BCM5717");
13539                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13540                         strcpy(tp->board_part_number, "BCM5718");
13541                 else
13542                         goto nomatch;
13543         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13544                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13545                         strcpy(tp->board_part_number, "BCM57780");
13546                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13547                         strcpy(tp->board_part_number, "BCM57760");
13548                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13549                         strcpy(tp->board_part_number, "BCM57790");
13550                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13551                         strcpy(tp->board_part_number, "BCM57788");
13552                 else
13553                         goto nomatch;
13554         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13555                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13556                         strcpy(tp->board_part_number, "BCM57761");
13557                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13558                         strcpy(tp->board_part_number, "BCM57765");
13559                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13560                         strcpy(tp->board_part_number, "BCM57781");
13561                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13562                         strcpy(tp->board_part_number, "BCM57785");
13563                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13564                         strcpy(tp->board_part_number, "BCM57791");
13565                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13566                         strcpy(tp->board_part_number, "BCM57795");
13567                 else
13568                         goto nomatch;
13569         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13570                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13571                         strcpy(tp->board_part_number, "BCM57762");
13572                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13573                         strcpy(tp->board_part_number, "BCM57766");
13574                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13575                         strcpy(tp->board_part_number, "BCM57782");
13576                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13577                         strcpy(tp->board_part_number, "BCM57786");
13578                 else
13579                         goto nomatch;
13580         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13581                 strcpy(tp->board_part_number, "BCM95906");
13582         } else {
13583 nomatch:
13584                 strcpy(tp->board_part_number, "none");
13585         }
13586 }
13587
13588 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13589 {
13590         u32 val;
13591
13592         if (tg3_nvram_read(tp, offset, &val) ||
13593             (val & 0xfc000000) != 0x0c000000 ||
13594             tg3_nvram_read(tp, offset + 4, &val) ||
13595             val != 0)
13596                 return 0;
13597
13598         return 1;
13599 }
13600
13601 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13602 {
13603         u32 val, offset, start, ver_offset;
13604         int i, dst_off;
13605         bool newver = false;
13606
13607         if (tg3_nvram_read(tp, 0xc, &offset) ||
13608             tg3_nvram_read(tp, 0x4, &start))
13609                 return;
13610
13611         offset = tg3_nvram_logical_addr(tp, offset);
13612
13613         if (tg3_nvram_read(tp, offset, &val))
13614                 return;
13615
13616         if ((val & 0xfc000000) == 0x0c000000) {
13617                 if (tg3_nvram_read(tp, offset + 4, &val))
13618                         return;
13619
13620                 if (val == 0)
13621                         newver = true;
13622         }
13623
13624         dst_off = strlen(tp->fw_ver);
13625
13626         if (newver) {
13627                 if (TG3_VER_SIZE - dst_off < 16 ||
13628                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13629                         return;
13630
13631                 offset = offset + ver_offset - start;
13632                 for (i = 0; i < 16; i += 4) {
13633                         __be32 v;
13634                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13635                                 return;
13636
13637                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13638                 }
13639         } else {
13640                 u32 major, minor;
13641
13642                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13643                         return;
13644
13645                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13646                         TG3_NVM_BCVER_MAJSFT;
13647                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13648                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13649                          "v%d.%02d", major, minor);
13650         }
13651 }
13652
13653 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13654 {
13655         u32 val, major, minor;
13656
13657         /* Use native endian representation */
13658         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13659                 return;
13660
13661         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13662                 TG3_NVM_HWSB_CFG1_MAJSFT;
13663         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13664                 TG3_NVM_HWSB_CFG1_MINSFT;
13665
13666         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13667 }
13668
13669 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13670 {
13671         u32 offset, major, minor, build;
13672
13673         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13674
13675         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13676                 return;
13677
13678         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13679         case TG3_EEPROM_SB_REVISION_0:
13680                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13681                 break;
13682         case TG3_EEPROM_SB_REVISION_2:
13683                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13684                 break;
13685         case TG3_EEPROM_SB_REVISION_3:
13686                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13687                 break;
13688         case TG3_EEPROM_SB_REVISION_4:
13689                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13690                 break;
13691         case TG3_EEPROM_SB_REVISION_5:
13692                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13693                 break;
13694         case TG3_EEPROM_SB_REVISION_6:
13695                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13696                 break;
13697         default:
13698                 return;
13699         }
13700
13701         if (tg3_nvram_read(tp, offset, &val))
13702                 return;
13703
13704         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13705                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13706         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13707                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13708         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13709
13710         if (minor > 99 || build > 26)
13711                 return;
13712
13713         offset = strlen(tp->fw_ver);
13714         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13715                  " v%d.%02d", major, minor);
13716
13717         if (build > 0) {
13718                 offset = strlen(tp->fw_ver);
13719                 if (offset < TG3_VER_SIZE - 1)
13720                         tp->fw_ver[offset] = 'a' + build - 1;
13721         }
13722 }
13723
13724 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13725 {
13726         u32 val, offset, start;
13727         int i, vlen;
13728
13729         for (offset = TG3_NVM_DIR_START;
13730              offset < TG3_NVM_DIR_END;
13731              offset += TG3_NVM_DIRENT_SIZE) {
13732                 if (tg3_nvram_read(tp, offset, &val))
13733                         return;
13734
13735                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13736                         break;
13737         }
13738
13739         if (offset == TG3_NVM_DIR_END)
13740                 return;
13741
13742         if (!tg3_flag(tp, 5705_PLUS))
13743                 start = 0x08000000;
13744         else if (tg3_nvram_read(tp, offset - 4, &start))
13745                 return;
13746
13747         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13748             !tg3_fw_img_is_valid(tp, offset) ||
13749             tg3_nvram_read(tp, offset + 8, &val))
13750                 return;
13751
13752         offset += val - start;
13753
13754         vlen = strlen(tp->fw_ver);
13755
13756         tp->fw_ver[vlen++] = ',';
13757         tp->fw_ver[vlen++] = ' ';
13758
13759         for (i = 0; i < 4; i++) {
13760                 __be32 v;
13761                 if (tg3_nvram_read_be32(tp, offset, &v))
13762                         return;
13763
13764                 offset += sizeof(v);
13765
13766                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13767                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13768                         break;
13769                 }
13770
13771                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13772                 vlen += sizeof(v);
13773         }
13774 }
13775
13776 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13777 {
13778         int vlen;
13779         u32 apedata;
13780         char *fwtype;
13781
13782         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13783                 return;
13784
13785         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13786         if (apedata != APE_SEG_SIG_MAGIC)
13787                 return;
13788
13789         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13790         if (!(apedata & APE_FW_STATUS_READY))
13791                 return;
13792
13793         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13794
13795         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13796                 tg3_flag_set(tp, APE_HAS_NCSI);
13797                 fwtype = "NCSI";
13798         } else {
13799                 fwtype = "DASH";
13800         }
13801
13802         vlen = strlen(tp->fw_ver);
13803
13804         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13805                  fwtype,
13806                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13807                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13808                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13809                  (apedata & APE_FW_VERSION_BLDMSK));
13810 }
13811
13812 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13813 {
13814         u32 val;
13815         bool vpd_vers = false;
13816
13817         if (tp->fw_ver[0] != 0)
13818                 vpd_vers = true;
13819
13820         if (tg3_flag(tp, NO_NVRAM)) {
13821                 strcat(tp->fw_ver, "sb");
13822                 return;
13823         }
13824
13825         if (tg3_nvram_read(tp, 0, &val))
13826                 return;
13827
13828         if (val == TG3_EEPROM_MAGIC)
13829                 tg3_read_bc_ver(tp);
13830         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13831                 tg3_read_sb_ver(tp, val);
13832         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13833                 tg3_read_hwsb_ver(tp);
13834         else
13835                 return;
13836
13837         if (vpd_vers)
13838                 goto done;
13839
13840         if (tg3_flag(tp, ENABLE_APE)) {
13841                 if (tg3_flag(tp, ENABLE_ASF))
13842                         tg3_read_dash_ver(tp);
13843         } else if (tg3_flag(tp, ENABLE_ASF)) {
13844                 tg3_read_mgmtfw_ver(tp);
13845         }
13846
13847 done:
13848         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13849 }
13850
13851 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13852 {
13853         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13854                 return TG3_RX_RET_MAX_SIZE_5717;
13855         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13856                 return TG3_RX_RET_MAX_SIZE_5700;
13857         else
13858                 return TG3_RX_RET_MAX_SIZE_5705;
13859 }
13860
13861 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13862         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13863         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13864         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13865         { },
13866 };
13867
13868 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13869 {
13870         struct pci_dev *peer;
13871         unsigned int func, devnr = tp->pdev->devfn & ~7;
13872
13873         for (func = 0; func < 8; func++) {
13874                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13875                 if (peer && peer != tp->pdev)
13876                         break;
13877                 pci_dev_put(peer);
13878         }
13879         /* 5704 can be configured in single-port mode, set peer to
13880          * tp->pdev in that case.
13881          */
13882         if (!peer) {
13883                 peer = tp->pdev;
13884                 return peer;
13885         }
13886
13887         /*
13888          * We don't need to keep the refcount elevated; there's no way
13889          * to remove one half of this device without removing the other
13890          */
13891         pci_dev_put(peer);
13892
13893         return peer;
13894 }
13895
13896 static int __devinit tg3_get_invariants(struct tg3 *tp)
13897 {
13898         u32 misc_ctrl_reg;
13899         u32 pci_state_reg, grc_misc_cfg;
13900         u32 val;
13901         u16 pci_cmd;
13902         int err;
13903
13904         /* Force memory write invalidate off.  If we leave it on,
13905          * then on 5700_BX chips we have to enable a workaround.
13906          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13907          * to match the cacheline size.  The Broadcom driver have this
13908          * workaround but turns MWI off all the times so never uses
13909          * it.  This seems to suggest that the workaround is insufficient.
13910          */
13911         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13912         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13913         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13914
13915         /* Important! -- Make sure register accesses are byteswapped
13916          * correctly.  Also, for those chips that require it, make
13917          * sure that indirect register accesses are enabled before
13918          * the first operation.
13919          */
13920         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13921                               &misc_ctrl_reg);
13922         tp->misc_host_ctrl |= (misc_ctrl_reg &
13923                                MISC_HOST_CTRL_CHIPREV);
13924         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13925                                tp->misc_host_ctrl);
13926
13927         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13928                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13929         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13930                 u32 prod_id_asic_rev;
13931
13932                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13933                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13934                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13935                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13936                         pci_read_config_dword(tp->pdev,
13937                                               TG3PCI_GEN2_PRODID_ASICREV,
13938                                               &prod_id_asic_rev);
13939                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13940                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13941                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13942                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13943                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13944                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13945                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13946                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13947                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13948                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13949                         pci_read_config_dword(tp->pdev,
13950                                               TG3PCI_GEN15_PRODID_ASICREV,
13951                                               &prod_id_asic_rev);
13952                 else
13953                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13954                                               &prod_id_asic_rev);
13955
13956                 tp->pci_chip_rev_id = prod_id_asic_rev;
13957         }
13958
13959         /* Wrong chip ID in 5752 A0. This code can be removed later
13960          * as A0 is not in production.
13961          */
13962         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13963                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13964
13965         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13966          * we need to disable memory and use config. cycles
13967          * only to access all registers. The 5702/03 chips
13968          * can mistakenly decode the special cycles from the
13969          * ICH chipsets as memory write cycles, causing corruption
13970          * of register and memory space. Only certain ICH bridges
13971          * will drive special cycles with non-zero data during the
13972          * address phase which can fall within the 5703's address
13973          * range. This is not an ICH bug as the PCI spec allows
13974          * non-zero address during special cycles. However, only
13975          * these ICH bridges are known to drive non-zero addresses
13976          * during special cycles.
13977          *
13978          * Since special cycles do not cross PCI bridges, we only
13979          * enable this workaround if the 5703 is on the secondary
13980          * bus of these ICH bridges.
13981          */
13982         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13983             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13984                 static struct tg3_dev_id {
13985                         u32     vendor;
13986                         u32     device;
13987                         u32     rev;
13988                 } ich_chipsets[] = {
13989                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13990                           PCI_ANY_ID },
13991                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13992                           PCI_ANY_ID },
13993                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13994                           0xa },
13995                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13996                           PCI_ANY_ID },
13997                         { },
13998                 };
13999                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14000                 struct pci_dev *bridge = NULL;
14001
14002                 while (pci_id->vendor != 0) {
14003                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14004                                                 bridge);
14005                         if (!bridge) {
14006                                 pci_id++;
14007                                 continue;
14008                         }
14009                         if (pci_id->rev != PCI_ANY_ID) {
14010                                 if (bridge->revision > pci_id->rev)
14011                                         continue;
14012                         }
14013                         if (bridge->subordinate &&
14014                             (bridge->subordinate->number ==
14015                              tp->pdev->bus->number)) {
14016                                 tg3_flag_set(tp, ICH_WORKAROUND);
14017                                 pci_dev_put(bridge);
14018                                 break;
14019                         }
14020                 }
14021         }
14022
14023         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14024                 static struct tg3_dev_id {
14025                         u32     vendor;
14026                         u32     device;
14027                 } bridge_chipsets[] = {
14028                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14029                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14030                         { },
14031                 };
14032                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14033                 struct pci_dev *bridge = NULL;
14034
14035                 while (pci_id->vendor != 0) {
14036                         bridge = pci_get_device(pci_id->vendor,
14037                                                 pci_id->device,
14038                                                 bridge);
14039                         if (!bridge) {
14040                                 pci_id++;
14041                                 continue;
14042                         }
14043                         if (bridge->subordinate &&
14044                             (bridge->subordinate->number <=
14045                              tp->pdev->bus->number) &&
14046                             (bridge->subordinate->subordinate >=
14047                              tp->pdev->bus->number)) {
14048                                 tg3_flag_set(tp, 5701_DMA_BUG);
14049                                 pci_dev_put(bridge);
14050                                 break;
14051                         }
14052                 }
14053         }
14054
14055         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14056          * DMA addresses > 40-bit. This bridge may have other additional
14057          * 57xx devices behind it in some 4-port NIC designs for example.
14058          * Any tg3 device found behind the bridge will also need the 40-bit
14059          * DMA workaround.
14060          */
14061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14062             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14063                 tg3_flag_set(tp, 5780_CLASS);
14064                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14065                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14066         } else {
14067                 struct pci_dev *bridge = NULL;
14068
14069                 do {
14070                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14071                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14072                                                 bridge);
14073                         if (bridge && bridge->subordinate &&
14074                             (bridge->subordinate->number <=
14075                              tp->pdev->bus->number) &&
14076                             (bridge->subordinate->subordinate >=
14077                              tp->pdev->bus->number)) {
14078                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14079                                 pci_dev_put(bridge);
14080                                 break;
14081                         }
14082                 } while (bridge);
14083         }
14084
14085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14086             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14087                 tp->pdev_peer = tg3_find_peer(tp);
14088
14089         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14090             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14091             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14092                 tg3_flag_set(tp, 5717_PLUS);
14093
14094         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14095             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14096                 tg3_flag_set(tp, 57765_CLASS);
14097
14098         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14099                 tg3_flag_set(tp, 57765_PLUS);
14100
14101         /* Intentionally exclude ASIC_REV_5906 */
14102         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14103             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14104             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14105             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14106             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14107             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14108             tg3_flag(tp, 57765_PLUS))
14109                 tg3_flag_set(tp, 5755_PLUS);
14110
14111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14112             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14113             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14114             tg3_flag(tp, 5755_PLUS) ||
14115             tg3_flag(tp, 5780_CLASS))
14116                 tg3_flag_set(tp, 5750_PLUS);
14117
14118         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14119             tg3_flag(tp, 5750_PLUS))
14120                 tg3_flag_set(tp, 5705_PLUS);
14121
14122         /* Determine TSO capabilities */
14123         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14124                 ; /* Do nothing. HW bug. */
14125         else if (tg3_flag(tp, 57765_PLUS))
14126                 tg3_flag_set(tp, HW_TSO_3);
14127         else if (tg3_flag(tp, 5755_PLUS) ||
14128                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14129                 tg3_flag_set(tp, HW_TSO_2);
14130         else if (tg3_flag(tp, 5750_PLUS)) {
14131                 tg3_flag_set(tp, HW_TSO_1);
14132                 tg3_flag_set(tp, TSO_BUG);
14133                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14134                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14135                         tg3_flag_clear(tp, TSO_BUG);
14136         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14137                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14138                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14139                         tg3_flag_set(tp, TSO_BUG);
14140                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14141                         tp->fw_needed = FIRMWARE_TG3TSO5;
14142                 else
14143                         tp->fw_needed = FIRMWARE_TG3TSO;
14144         }
14145
14146         /* Selectively allow TSO based on operating conditions */
14147         if (tg3_flag(tp, HW_TSO_1) ||
14148             tg3_flag(tp, HW_TSO_2) ||
14149             tg3_flag(tp, HW_TSO_3) ||
14150             tp->fw_needed) {
14151                 /* For firmware TSO, assume ASF is disabled.
14152                  * We'll disable TSO later if we discover ASF
14153                  * is enabled in tg3_get_eeprom_hw_cfg().
14154                  */
14155                 tg3_flag_set(tp, TSO_CAPABLE);
14156         } else {
14157                 tg3_flag_clear(tp, TSO_CAPABLE);
14158                 tg3_flag_clear(tp, TSO_BUG);
14159                 tp->fw_needed = NULL;
14160         }
14161
14162         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14163                 tp->fw_needed = FIRMWARE_TG3;
14164
14165         tp->irq_max = 1;
14166
14167         if (tg3_flag(tp, 5750_PLUS)) {
14168                 tg3_flag_set(tp, SUPPORT_MSI);
14169                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14170                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14171                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14172                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14173                      tp->pdev_peer == tp->pdev))
14174                         tg3_flag_clear(tp, SUPPORT_MSI);
14175
14176                 if (tg3_flag(tp, 5755_PLUS) ||
14177                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14178                         tg3_flag_set(tp, 1SHOT_MSI);
14179                 }
14180
14181                 if (tg3_flag(tp, 57765_PLUS)) {
14182                         tg3_flag_set(tp, SUPPORT_MSIX);
14183                         tp->irq_max = TG3_IRQ_MAX_VECS;
14184                         tg3_rss_init_dflt_indir_tbl(tp);
14185                 }
14186         }
14187
14188         if (tg3_flag(tp, 5755_PLUS))
14189                 tg3_flag_set(tp, SHORT_DMA_BUG);
14190
14191         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14192                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14193         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14194                 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
14195
14196         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14197             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14198             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14199                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14200
14201         if (tg3_flag(tp, 57765_PLUS) &&
14202             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14203                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14204
14205         if (!tg3_flag(tp, 5705_PLUS) ||
14206             tg3_flag(tp, 5780_CLASS) ||
14207             tg3_flag(tp, USE_JUMBO_BDFLAG))
14208                 tg3_flag_set(tp, JUMBO_CAPABLE);
14209
14210         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14211                               &pci_state_reg);
14212
14213         if (pci_is_pcie(tp->pdev)) {
14214                 u16 lnkctl;
14215
14216                 tg3_flag_set(tp, PCI_EXPRESS);
14217
14218                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14219                         int readrq = pcie_get_readrq(tp->pdev);
14220                         if (readrq > 2048)
14221                                 pcie_set_readrq(tp->pdev, 2048);
14222                 }
14223
14224                 pci_read_config_word(tp->pdev,
14225                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14226                                      &lnkctl);
14227                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14228                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14229                             ASIC_REV_5906) {
14230                                 tg3_flag_clear(tp, HW_TSO_2);
14231                                 tg3_flag_clear(tp, TSO_CAPABLE);
14232                         }
14233                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14234                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14235                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14236                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14237                                 tg3_flag_set(tp, CLKREQ_BUG);
14238                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14239                         tg3_flag_set(tp, L1PLLPD_EN);
14240                 }
14241         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14242                 /* BCM5785 devices are effectively PCIe devices, and should
14243                  * follow PCIe codepaths, but do not have a PCIe capabilities
14244                  * section.
14245                  */
14246                 tg3_flag_set(tp, PCI_EXPRESS);
14247         } else if (!tg3_flag(tp, 5705_PLUS) ||
14248                    tg3_flag(tp, 5780_CLASS)) {
14249                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14250                 if (!tp->pcix_cap) {
14251                         dev_err(&tp->pdev->dev,
14252                                 "Cannot find PCI-X capability, aborting\n");
14253                         return -EIO;
14254                 }
14255
14256                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14257                         tg3_flag_set(tp, PCIX_MODE);
14258         }
14259
14260         /* If we have an AMD 762 or VIA K8T800 chipset, write
14261          * reordering to the mailbox registers done by the host
14262          * controller can cause major troubles.  We read back from
14263          * every mailbox register write to force the writes to be
14264          * posted to the chip in order.
14265          */
14266         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14267             !tg3_flag(tp, PCI_EXPRESS))
14268                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14269
14270         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14271                              &tp->pci_cacheline_sz);
14272         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14273                              &tp->pci_lat_timer);
14274         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14275             tp->pci_lat_timer < 64) {
14276                 tp->pci_lat_timer = 64;
14277                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14278                                       tp->pci_lat_timer);
14279         }
14280
14281         /* Important! -- It is critical that the PCI-X hw workaround
14282          * situation is decided before the first MMIO register access.
14283          */
14284         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14285                 /* 5700 BX chips need to have their TX producer index
14286                  * mailboxes written twice to workaround a bug.
14287                  */
14288                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14289
14290                 /* If we are in PCI-X mode, enable register write workaround.
14291                  *
14292                  * The workaround is to use indirect register accesses
14293                  * for all chip writes not to mailbox registers.
14294                  */
14295                 if (tg3_flag(tp, PCIX_MODE)) {
14296                         u32 pm_reg;
14297
14298                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14299
14300                         /* The chip can have it's power management PCI config
14301                          * space registers clobbered due to this bug.
14302                          * So explicitly force the chip into D0 here.
14303                          */
14304                         pci_read_config_dword(tp->pdev,
14305                                               tp->pm_cap + PCI_PM_CTRL,
14306                                               &pm_reg);
14307                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14308                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14309                         pci_write_config_dword(tp->pdev,
14310                                                tp->pm_cap + PCI_PM_CTRL,
14311                                                pm_reg);
14312
14313                         /* Also, force SERR#/PERR# in PCI command. */
14314                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14315                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14316                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14317                 }
14318         }
14319
14320         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14321                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14322         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14323                 tg3_flag_set(tp, PCI_32BIT);
14324
14325         /* Chip-specific fixup from Broadcom driver */
14326         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14327             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14328                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14329                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14330         }
14331
14332         /* Default fast path register access methods */
14333         tp->read32 = tg3_read32;
14334         tp->write32 = tg3_write32;
14335         tp->read32_mbox = tg3_read32;
14336         tp->write32_mbox = tg3_write32;
14337         tp->write32_tx_mbox = tg3_write32;
14338         tp->write32_rx_mbox = tg3_write32;
14339
14340         /* Various workaround register access methods */
14341         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14342                 tp->write32 = tg3_write_indirect_reg32;
14343         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14344                  (tg3_flag(tp, PCI_EXPRESS) &&
14345                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14346                 /*
14347                  * Back to back register writes can cause problems on these
14348                  * chips, the workaround is to read back all reg writes
14349                  * except those to mailbox regs.
14350                  *
14351                  * See tg3_write_indirect_reg32().
14352                  */
14353                 tp->write32 = tg3_write_flush_reg32;
14354         }
14355
14356         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14357                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14358                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14359                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14360         }
14361
14362         if (tg3_flag(tp, ICH_WORKAROUND)) {
14363                 tp->read32 = tg3_read_indirect_reg32;
14364                 tp->write32 = tg3_write_indirect_reg32;
14365                 tp->read32_mbox = tg3_read_indirect_mbox;
14366                 tp->write32_mbox = tg3_write_indirect_mbox;
14367                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14368                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14369
14370                 iounmap(tp->regs);
14371                 tp->regs = NULL;
14372
14373                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14374                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14375                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14376         }
14377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14378                 tp->read32_mbox = tg3_read32_mbox_5906;
14379                 tp->write32_mbox = tg3_write32_mbox_5906;
14380                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14381                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14382         }
14383
14384         if (tp->write32 == tg3_write_indirect_reg32 ||
14385             (tg3_flag(tp, PCIX_MODE) &&
14386              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14387               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14388                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14389
14390         /* The memory arbiter has to be enabled in order for SRAM accesses
14391          * to succeed.  Normally on powerup the tg3 chip firmware will make
14392          * sure it is enabled, but other entities such as system netboot
14393          * code might disable it.
14394          */
14395         val = tr32(MEMARB_MODE);
14396         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14397
14398         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14399         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14400             tg3_flag(tp, 5780_CLASS)) {
14401                 if (tg3_flag(tp, PCIX_MODE)) {
14402                         pci_read_config_dword(tp->pdev,
14403                                               tp->pcix_cap + PCI_X_STATUS,
14404                                               &val);
14405                         tp->pci_fn = val & 0x7;
14406                 }
14407         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14408                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14409                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14410                     NIC_SRAM_CPMUSTAT_SIG) {
14411                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14412                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14413                 }
14414         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14415                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14416                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14417                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14418                     NIC_SRAM_CPMUSTAT_SIG) {
14419                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14420                                      TG3_CPMU_STATUS_FSHFT_5719;
14421                 }
14422         }
14423
14424         /* Get eeprom hw config before calling tg3_set_power_state().
14425          * In particular, the TG3_FLAG_IS_NIC flag must be
14426          * determined before calling tg3_set_power_state() so that
14427          * we know whether or not to switch out of Vaux power.
14428          * When the flag is set, it means that GPIO1 is used for eeprom
14429          * write protect and also implies that it is a LOM where GPIOs
14430          * are not used to switch power.
14431          */
14432         tg3_get_eeprom_hw_cfg(tp);
14433
14434         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14435                 tg3_flag_clear(tp, TSO_CAPABLE);
14436                 tg3_flag_clear(tp, TSO_BUG);
14437                 tp->fw_needed = NULL;
14438         }
14439
14440         if (tg3_flag(tp, ENABLE_APE)) {
14441                 /* Allow reads and writes to the
14442                  * APE register and memory space.
14443                  */
14444                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14445                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14446                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14447                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14448                                        pci_state_reg);
14449
14450                 tg3_ape_lock_init(tp);
14451         }
14452
14453         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14454             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14455             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14456             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14457             tg3_flag(tp, 57765_PLUS))
14458                 tg3_flag_set(tp, CPMU_PRESENT);
14459
14460         /* Set up tp->grc_local_ctrl before calling
14461          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14462          * will bring 5700's external PHY out of reset.
14463          * It is also used as eeprom write protect on LOMs.
14464          */
14465         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14467             tg3_flag(tp, EEPROM_WRITE_PROT))
14468                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14469                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14470         /* Unused GPIO3 must be driven as output on 5752 because there
14471          * are no pull-up resistors on unused GPIO pins.
14472          */
14473         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14474                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14475
14476         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14477             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14478             tg3_flag(tp, 57765_CLASS))
14479                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14480
14481         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14482             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14483                 /* Turn off the debug UART. */
14484                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14485                 if (tg3_flag(tp, IS_NIC))
14486                         /* Keep VMain power. */
14487                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14488                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14489         }
14490
14491         /* Switch out of Vaux if it is a NIC */
14492         tg3_pwrsrc_switch_to_vmain(tp);
14493
14494         /* Derive initial jumbo mode from MTU assigned in
14495          * ether_setup() via the alloc_etherdev() call
14496          */
14497         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14498                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14499
14500         /* Determine WakeOnLan speed to use. */
14501         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14502             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14503             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14504             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14505                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14506         } else {
14507                 tg3_flag_set(tp, WOL_SPEED_100MB);
14508         }
14509
14510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14511                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14512
14513         /* A few boards don't want Ethernet@WireSpeed phy feature */
14514         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14515             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14516              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14517              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14518             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14519             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14520                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14521
14522         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14523             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14524                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14525         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14526                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14527
14528         if (tg3_flag(tp, 5705_PLUS) &&
14529             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14530             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14531             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14532             !tg3_flag(tp, 57765_PLUS)) {
14533                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14534                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14535                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14536                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14537                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14538                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14539                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14540                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14541                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14542                 } else
14543                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14544         }
14545
14546         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14547             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14548                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14549                 if (tp->phy_otp == 0)
14550                         tp->phy_otp = TG3_OTP_DEFAULT;
14551         }
14552
14553         if (tg3_flag(tp, CPMU_PRESENT))
14554                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14555         else
14556                 tp->mi_mode = MAC_MI_MODE_BASE;
14557
14558         tp->coalesce_mode = 0;
14559         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14560             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14561                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14562
14563         /* Set these bits to enable statistics workaround. */
14564         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14565             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14566             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14567                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14568                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14569         }
14570
14571         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14572             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14573                 tg3_flag_set(tp, USE_PHYLIB);
14574
14575         err = tg3_mdio_init(tp);
14576         if (err)
14577                 return err;
14578
14579         /* Initialize data/descriptor byte/word swapping. */
14580         val = tr32(GRC_MODE);
14581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14582                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14583                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14584                         GRC_MODE_B2HRX_ENABLE |
14585                         GRC_MODE_HTX2B_ENABLE |
14586                         GRC_MODE_HOST_STACKUP);
14587         else
14588                 val &= GRC_MODE_HOST_STACKUP;
14589
14590         tw32(GRC_MODE, val | tp->grc_mode);
14591
14592         tg3_switch_clocks(tp);
14593
14594         /* Clear this out for sanity. */
14595         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14596
14597         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14598                               &pci_state_reg);
14599         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14600             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14601                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14602
14603                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14604                     chiprevid == CHIPREV_ID_5701_B0 ||
14605                     chiprevid == CHIPREV_ID_5701_B2 ||
14606                     chiprevid == CHIPREV_ID_5701_B5) {
14607                         void __iomem *sram_base;
14608
14609                         /* Write some dummy words into the SRAM status block
14610                          * area, see if it reads back correctly.  If the return
14611                          * value is bad, force enable the PCIX workaround.
14612                          */
14613                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14614
14615                         writel(0x00000000, sram_base);
14616                         writel(0x00000000, sram_base + 4);
14617                         writel(0xffffffff, sram_base + 4);
14618                         if (readl(sram_base) != 0x00000000)
14619                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14620                 }
14621         }
14622
14623         udelay(50);
14624         tg3_nvram_init(tp);
14625
14626         grc_misc_cfg = tr32(GRC_MISC_CFG);
14627         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14628
14629         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14630             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14631              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14632                 tg3_flag_set(tp, IS_5788);
14633
14634         if (!tg3_flag(tp, IS_5788) &&
14635             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14636                 tg3_flag_set(tp, TAGGED_STATUS);
14637         if (tg3_flag(tp, TAGGED_STATUS)) {
14638                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14639                                       HOSTCC_MODE_CLRTICK_TXBD);
14640
14641                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14642                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14643                                        tp->misc_host_ctrl);
14644         }
14645
14646         /* Preserve the APE MAC_MODE bits */
14647         if (tg3_flag(tp, ENABLE_APE))
14648                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14649         else
14650                 tp->mac_mode = 0;
14651
14652         /* these are limited to 10/100 only */
14653         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14654              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14655             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14656              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14657              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14658               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14659               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14660             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14661              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14662               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14663               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14664             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14665             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14666             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14667             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14668                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14669
14670         err = tg3_phy_probe(tp);
14671         if (err) {
14672                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14673                 /* ... but do not return immediately ... */
14674                 tg3_mdio_fini(tp);
14675         }
14676
14677         tg3_read_vpd(tp);
14678         tg3_read_fw_ver(tp);
14679
14680         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14681                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14682         } else {
14683                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14684                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14685                 else
14686                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14687         }
14688
14689         /* 5700 {AX,BX} chips have a broken status block link
14690          * change bit implementation, so we must use the
14691          * status register in those cases.
14692          */
14693         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14694                 tg3_flag_set(tp, USE_LINKCHG_REG);
14695         else
14696                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14697
14698         /* The led_ctrl is set during tg3_phy_probe, here we might
14699          * have to force the link status polling mechanism based
14700          * upon subsystem IDs.
14701          */
14702         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14703             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14704             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14705                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14706                 tg3_flag_set(tp, USE_LINKCHG_REG);
14707         }
14708
14709         /* For all SERDES we poll the MAC status register. */
14710         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14711                 tg3_flag_set(tp, POLL_SERDES);
14712         else
14713                 tg3_flag_clear(tp, POLL_SERDES);
14714
14715         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14716         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14718             tg3_flag(tp, PCIX_MODE)) {
14719                 tp->rx_offset = NET_SKB_PAD;
14720 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14721                 tp->rx_copy_thresh = ~(u16)0;
14722 #endif
14723         }
14724
14725         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14726         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14727         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14728
14729         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14730
14731         /* Increment the rx prod index on the rx std ring by at most
14732          * 8 for these chips to workaround hw errata.
14733          */
14734         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14735             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14736             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14737                 tp->rx_std_max_post = 8;
14738
14739         if (tg3_flag(tp, ASPM_WORKAROUND))
14740                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14741                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14742
14743         return err;
14744 }
14745
14746 #ifdef CONFIG_SPARC
14747 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14748 {
14749         struct net_device *dev = tp->dev;
14750         struct pci_dev *pdev = tp->pdev;
14751         struct device_node *dp = pci_device_to_OF_node(pdev);
14752         const unsigned char *addr;
14753         int len;
14754
14755         addr = of_get_property(dp, "local-mac-address", &len);
14756         if (addr && len == 6) {
14757                 memcpy(dev->dev_addr, addr, 6);
14758                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14759                 return 0;
14760         }
14761         return -ENODEV;
14762 }
14763
14764 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14765 {
14766         struct net_device *dev = tp->dev;
14767
14768         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14769         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14770         return 0;
14771 }
14772 #endif
14773
14774 static int __devinit tg3_get_device_address(struct tg3 *tp)
14775 {
14776         struct net_device *dev = tp->dev;
14777         u32 hi, lo, mac_offset;
14778         int addr_ok = 0;
14779
14780 #ifdef CONFIG_SPARC
14781         if (!tg3_get_macaddr_sparc(tp))
14782                 return 0;
14783 #endif
14784
14785         mac_offset = 0x7c;
14786         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14787             tg3_flag(tp, 5780_CLASS)) {
14788                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14789                         mac_offset = 0xcc;
14790                 if (tg3_nvram_lock(tp))
14791                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14792                 else
14793                         tg3_nvram_unlock(tp);
14794         } else if (tg3_flag(tp, 5717_PLUS)) {
14795                 if (tp->pci_fn & 1)
14796                         mac_offset = 0xcc;
14797                 if (tp->pci_fn > 1)
14798                         mac_offset += 0x18c;
14799         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14800                 mac_offset = 0x10;
14801
14802         /* First try to get it from MAC address mailbox. */
14803         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14804         if ((hi >> 16) == 0x484b) {
14805                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14806                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14807
14808                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14809                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14810                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14811                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14812                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14813
14814                 /* Some old bootcode may report a 0 MAC address in SRAM */
14815                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14816         }
14817         if (!addr_ok) {
14818                 /* Next, try NVRAM. */
14819                 if (!tg3_flag(tp, NO_NVRAM) &&
14820                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14821                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14822                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14823                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14824                 }
14825                 /* Finally just fetch it out of the MAC control regs. */
14826                 else {
14827                         hi = tr32(MAC_ADDR_0_HIGH);
14828                         lo = tr32(MAC_ADDR_0_LOW);
14829
14830                         dev->dev_addr[5] = lo & 0xff;
14831                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14832                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14833                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14834                         dev->dev_addr[1] = hi & 0xff;
14835                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14836                 }
14837         }
14838
14839         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14840 #ifdef CONFIG_SPARC
14841                 if (!tg3_get_default_macaddr_sparc(tp))
14842                         return 0;
14843 #endif
14844                 return -EINVAL;
14845         }
14846         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14847         return 0;
14848 }
14849
14850 #define BOUNDARY_SINGLE_CACHELINE       1
14851 #define BOUNDARY_MULTI_CACHELINE        2
14852
14853 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14854 {
14855         int cacheline_size;
14856         u8 byte;
14857         int goal;
14858
14859         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14860         if (byte == 0)
14861                 cacheline_size = 1024;
14862         else
14863                 cacheline_size = (int) byte * 4;
14864
14865         /* On 5703 and later chips, the boundary bits have no
14866          * effect.
14867          */
14868         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14869             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14870             !tg3_flag(tp, PCI_EXPRESS))
14871                 goto out;
14872
14873 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14874         goal = BOUNDARY_MULTI_CACHELINE;
14875 #else
14876 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14877         goal = BOUNDARY_SINGLE_CACHELINE;
14878 #else
14879         goal = 0;
14880 #endif
14881 #endif
14882
14883         if (tg3_flag(tp, 57765_PLUS)) {
14884                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14885                 goto out;
14886         }
14887
14888         if (!goal)
14889                 goto out;
14890
14891         /* PCI controllers on most RISC systems tend to disconnect
14892          * when a device tries to burst across a cache-line boundary.
14893          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14894          *
14895          * Unfortunately, for PCI-E there are only limited
14896          * write-side controls for this, and thus for reads
14897          * we will still get the disconnects.  We'll also waste
14898          * these PCI cycles for both read and write for chips
14899          * other than 5700 and 5701 which do not implement the
14900          * boundary bits.
14901          */
14902         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14903                 switch (cacheline_size) {
14904                 case 16:
14905                 case 32:
14906                 case 64:
14907                 case 128:
14908                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14909                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14910                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14911                         } else {
14912                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14913                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14914                         }
14915                         break;
14916
14917                 case 256:
14918                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14919                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14920                         break;
14921
14922                 default:
14923                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14924                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14925                         break;
14926                 }
14927         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14928                 switch (cacheline_size) {
14929                 case 16:
14930                 case 32:
14931                 case 64:
14932                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14933                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14934                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14935                                 break;
14936                         }
14937                         /* fallthrough */
14938                 case 128:
14939                 default:
14940                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14941                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14942                         break;
14943                 }
14944         } else {
14945                 switch (cacheline_size) {
14946                 case 16:
14947                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14948                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14949                                         DMA_RWCTRL_WRITE_BNDRY_16);
14950                                 break;
14951                         }
14952                         /* fallthrough */
14953                 case 32:
14954                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14955                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14956                                         DMA_RWCTRL_WRITE_BNDRY_32);
14957                                 break;
14958                         }
14959                         /* fallthrough */
14960                 case 64:
14961                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14962                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14963                                         DMA_RWCTRL_WRITE_BNDRY_64);
14964                                 break;
14965                         }
14966                         /* fallthrough */
14967                 case 128:
14968                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14969                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14970                                         DMA_RWCTRL_WRITE_BNDRY_128);
14971                                 break;
14972                         }
14973                         /* fallthrough */
14974                 case 256:
14975                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14976                                 DMA_RWCTRL_WRITE_BNDRY_256);
14977                         break;
14978                 case 512:
14979                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14980                                 DMA_RWCTRL_WRITE_BNDRY_512);
14981                         break;
14982                 case 1024:
14983                 default:
14984                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14985                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14986                         break;
14987                 }
14988         }
14989
14990 out:
14991         return val;
14992 }
14993
14994 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14995 {
14996         struct tg3_internal_buffer_desc test_desc;
14997         u32 sram_dma_descs;
14998         int i, ret;
14999
15000         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15001
15002         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15003         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15004         tw32(RDMAC_STATUS, 0);
15005         tw32(WDMAC_STATUS, 0);
15006
15007         tw32(BUFMGR_MODE, 0);
15008         tw32(FTQ_RESET, 0);
15009
15010         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15011         test_desc.addr_lo = buf_dma & 0xffffffff;
15012         test_desc.nic_mbuf = 0x00002100;
15013         test_desc.len = size;
15014
15015         /*
15016          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15017          * the *second* time the tg3 driver was getting loaded after an
15018          * initial scan.
15019          *
15020          * Broadcom tells me:
15021          *   ...the DMA engine is connected to the GRC block and a DMA
15022          *   reset may affect the GRC block in some unpredictable way...
15023          *   The behavior of resets to individual blocks has not been tested.
15024          *
15025          * Broadcom noted the GRC reset will also reset all sub-components.
15026          */
15027         if (to_device) {
15028                 test_desc.cqid_sqid = (13 << 8) | 2;
15029
15030                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15031                 udelay(40);
15032         } else {
15033                 test_desc.cqid_sqid = (16 << 8) | 7;
15034
15035                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15036                 udelay(40);
15037         }
15038         test_desc.flags = 0x00000005;
15039
15040         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15041                 u32 val;
15042
15043                 val = *(((u32 *)&test_desc) + i);
15044                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15045                                        sram_dma_descs + (i * sizeof(u32)));
15046                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15047         }
15048         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15049
15050         if (to_device)
15051                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15052         else
15053                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15054
15055         ret = -ENODEV;
15056         for (i = 0; i < 40; i++) {
15057                 u32 val;
15058
15059                 if (to_device)
15060                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15061                 else
15062                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15063                 if ((val & 0xffff) == sram_dma_descs) {
15064                         ret = 0;
15065                         break;
15066                 }
15067
15068                 udelay(100);
15069         }
15070
15071         return ret;
15072 }
15073
15074 #define TEST_BUFFER_SIZE        0x2000
15075
15076 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15077         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15078         { },
15079 };
15080
15081 static int __devinit tg3_test_dma(struct tg3 *tp)
15082 {
15083         dma_addr_t buf_dma;
15084         u32 *buf, saved_dma_rwctrl;
15085         int ret = 0;
15086
15087         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15088                                  &buf_dma, GFP_KERNEL);
15089         if (!buf) {
15090                 ret = -ENOMEM;
15091                 goto out_nofree;
15092         }
15093
15094         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15095                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15096
15097         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15098
15099         if (tg3_flag(tp, 57765_PLUS))
15100                 goto out;
15101
15102         if (tg3_flag(tp, PCI_EXPRESS)) {
15103                 /* DMA read watermark not used on PCIE */
15104                 tp->dma_rwctrl |= 0x00180000;
15105         } else if (!tg3_flag(tp, PCIX_MODE)) {
15106                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15107                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15108                         tp->dma_rwctrl |= 0x003f0000;
15109                 else
15110                         tp->dma_rwctrl |= 0x003f000f;
15111         } else {
15112                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15113                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15114                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15115                         u32 read_water = 0x7;
15116
15117                         /* If the 5704 is behind the EPB bridge, we can
15118                          * do the less restrictive ONE_DMA workaround for
15119                          * better performance.
15120                          */
15121                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15122                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15123                                 tp->dma_rwctrl |= 0x8000;
15124                         else if (ccval == 0x6 || ccval == 0x7)
15125                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15126
15127                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15128                                 read_water = 4;
15129                         /* Set bit 23 to enable PCIX hw bug fix */
15130                         tp->dma_rwctrl |=
15131                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15132                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15133                                 (1 << 23);
15134                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15135                         /* 5780 always in PCIX mode */
15136                         tp->dma_rwctrl |= 0x00144000;
15137                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15138                         /* 5714 always in PCIX mode */
15139                         tp->dma_rwctrl |= 0x00148000;
15140                 } else {
15141                         tp->dma_rwctrl |= 0x001b000f;
15142                 }
15143         }
15144
15145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15147                 tp->dma_rwctrl &= 0xfffffff0;
15148
15149         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15151                 /* Remove this if it causes problems for some boards. */
15152                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15153
15154                 /* On 5700/5701 chips, we need to set this bit.
15155                  * Otherwise the chip will issue cacheline transactions
15156                  * to streamable DMA memory with not all the byte
15157                  * enables turned on.  This is an error on several
15158                  * RISC PCI controllers, in particular sparc64.
15159                  *
15160                  * On 5703/5704 chips, this bit has been reassigned
15161                  * a different meaning.  In particular, it is used
15162                  * on those chips to enable a PCI-X workaround.
15163                  */
15164                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15165         }
15166
15167         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15168
15169 #if 0
15170         /* Unneeded, already done by tg3_get_invariants.  */
15171         tg3_switch_clocks(tp);
15172 #endif
15173
15174         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15175             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15176                 goto out;
15177
15178         /* It is best to perform DMA test with maximum write burst size
15179          * to expose the 5700/5701 write DMA bug.
15180          */
15181         saved_dma_rwctrl = tp->dma_rwctrl;
15182         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15183         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15184
15185         while (1) {
15186                 u32 *p = buf, i;
15187
15188                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15189                         p[i] = i;
15190
15191                 /* Send the buffer to the chip. */
15192                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15193                 if (ret) {
15194                         dev_err(&tp->pdev->dev,
15195                                 "%s: Buffer write failed. err = %d\n",
15196                                 __func__, ret);
15197                         break;
15198                 }
15199
15200 #if 0
15201                 /* validate data reached card RAM correctly. */
15202                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15203                         u32 val;
15204                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15205                         if (le32_to_cpu(val) != p[i]) {
15206                                 dev_err(&tp->pdev->dev,
15207                                         "%s: Buffer corrupted on device! "
15208                                         "(%d != %d)\n", __func__, val, i);
15209                                 /* ret = -ENODEV here? */
15210                         }
15211                         p[i] = 0;
15212                 }
15213 #endif
15214                 /* Now read it back. */
15215                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15216                 if (ret) {
15217                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15218                                 "err = %d\n", __func__, ret);
15219                         break;
15220                 }
15221
15222                 /* Verify it. */
15223                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15224                         if (p[i] == i)
15225                                 continue;
15226
15227                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15228                             DMA_RWCTRL_WRITE_BNDRY_16) {
15229                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15230                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15231                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15232                                 break;
15233                         } else {
15234                                 dev_err(&tp->pdev->dev,
15235                                         "%s: Buffer corrupted on read back! "
15236                                         "(%d != %d)\n", __func__, p[i], i);
15237                                 ret = -ENODEV;
15238                                 goto out;
15239                         }
15240                 }
15241
15242                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15243                         /* Success. */
15244                         ret = 0;
15245                         break;
15246                 }
15247         }
15248         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15249             DMA_RWCTRL_WRITE_BNDRY_16) {
15250                 /* DMA test passed without adjusting DMA boundary,
15251                  * now look for chipsets that are known to expose the
15252                  * DMA bug without failing the test.
15253                  */
15254                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15255                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15256                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15257                 } else {
15258                         /* Safe to use the calculated DMA boundary. */
15259                         tp->dma_rwctrl = saved_dma_rwctrl;
15260                 }
15261
15262                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15263         }
15264
15265 out:
15266         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15267 out_nofree:
15268         return ret;
15269 }
15270
15271 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15272 {
15273         if (tg3_flag(tp, 57765_PLUS)) {
15274                 tp->bufmgr_config.mbuf_read_dma_low_water =
15275                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15276                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15277                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15278                 tp->bufmgr_config.mbuf_high_water =
15279                         DEFAULT_MB_HIGH_WATER_57765;
15280
15281                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15282                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15283                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15284                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15285                 tp->bufmgr_config.mbuf_high_water_jumbo =
15286                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15287         } else if (tg3_flag(tp, 5705_PLUS)) {
15288                 tp->bufmgr_config.mbuf_read_dma_low_water =
15289                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15290                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15291                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15292                 tp->bufmgr_config.mbuf_high_water =
15293                         DEFAULT_MB_HIGH_WATER_5705;
15294                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15295                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15296                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15297                         tp->bufmgr_config.mbuf_high_water =
15298                                 DEFAULT_MB_HIGH_WATER_5906;
15299                 }
15300
15301                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15302                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15303                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15304                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15305                 tp->bufmgr_config.mbuf_high_water_jumbo =
15306                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15307         } else {
15308                 tp->bufmgr_config.mbuf_read_dma_low_water =
15309                         DEFAULT_MB_RDMA_LOW_WATER;
15310                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15311                         DEFAULT_MB_MACRX_LOW_WATER;
15312                 tp->bufmgr_config.mbuf_high_water =
15313                         DEFAULT_MB_HIGH_WATER;
15314
15315                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15316                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15317                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15318                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15319                 tp->bufmgr_config.mbuf_high_water_jumbo =
15320                         DEFAULT_MB_HIGH_WATER_JUMBO;
15321         }
15322
15323         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15324         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15325 }
15326
15327 static char * __devinit tg3_phy_string(struct tg3 *tp)
15328 {
15329         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15330         case TG3_PHY_ID_BCM5400:        return "5400";
15331         case TG3_PHY_ID_BCM5401:        return "5401";
15332         case TG3_PHY_ID_BCM5411:        return "5411";
15333         case TG3_PHY_ID_BCM5701:        return "5701";
15334         case TG3_PHY_ID_BCM5703:        return "5703";
15335         case TG3_PHY_ID_BCM5704:        return "5704";
15336         case TG3_PHY_ID_BCM5705:        return "5705";
15337         case TG3_PHY_ID_BCM5750:        return "5750";
15338         case TG3_PHY_ID_BCM5752:        return "5752";
15339         case TG3_PHY_ID_BCM5714:        return "5714";
15340         case TG3_PHY_ID_BCM5780:        return "5780";
15341         case TG3_PHY_ID_BCM5755:        return "5755";
15342         case TG3_PHY_ID_BCM5787:        return "5787";
15343         case TG3_PHY_ID_BCM5784:        return "5784";
15344         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15345         case TG3_PHY_ID_BCM5906:        return "5906";
15346         case TG3_PHY_ID_BCM5761:        return "5761";
15347         case TG3_PHY_ID_BCM5718C:       return "5718C";
15348         case TG3_PHY_ID_BCM5718S:       return "5718S";
15349         case TG3_PHY_ID_BCM57765:       return "57765";
15350         case TG3_PHY_ID_BCM5719C:       return "5719C";
15351         case TG3_PHY_ID_BCM5720C:       return "5720C";
15352         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15353         case 0:                 return "serdes";
15354         default:                return "unknown";
15355         }
15356 }
15357
15358 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15359 {
15360         if (tg3_flag(tp, PCI_EXPRESS)) {
15361                 strcpy(str, "PCI Express");
15362                 return str;
15363         } else if (tg3_flag(tp, PCIX_MODE)) {
15364                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15365
15366                 strcpy(str, "PCIX:");
15367
15368                 if ((clock_ctrl == 7) ||
15369                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15370                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15371                         strcat(str, "133MHz");
15372                 else if (clock_ctrl == 0)
15373                         strcat(str, "33MHz");
15374                 else if (clock_ctrl == 2)
15375                         strcat(str, "50MHz");
15376                 else if (clock_ctrl == 4)
15377                         strcat(str, "66MHz");
15378                 else if (clock_ctrl == 6)
15379                         strcat(str, "100MHz");
15380         } else {
15381                 strcpy(str, "PCI:");
15382                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15383                         strcat(str, "66MHz");
15384                 else
15385                         strcat(str, "33MHz");
15386         }
15387         if (tg3_flag(tp, PCI_32BIT))
15388                 strcat(str, ":32-bit");
15389         else
15390                 strcat(str, ":64-bit");
15391         return str;
15392 }
15393
15394 static void __devinit tg3_init_coal(struct tg3 *tp)
15395 {
15396         struct ethtool_coalesce *ec = &tp->coal;
15397
15398         memset(ec, 0, sizeof(*ec));
15399         ec->cmd = ETHTOOL_GCOALESCE;
15400         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15401         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15402         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15403         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15404         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15405         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15406         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15407         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15408         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15409
15410         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15411                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15412                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15413                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15414                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15415                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15416         }
15417
15418         if (tg3_flag(tp, 5705_PLUS)) {
15419                 ec->rx_coalesce_usecs_irq = 0;
15420                 ec->tx_coalesce_usecs_irq = 0;
15421                 ec->stats_block_coalesce_usecs = 0;
15422         }
15423 }
15424
15425 static int __devinit tg3_init_one(struct pci_dev *pdev,
15426                                   const struct pci_device_id *ent)
15427 {
15428         struct net_device *dev;
15429         struct tg3 *tp;
15430         int i, err, pm_cap;
15431         u32 sndmbx, rcvmbx, intmbx;
15432         char str[40];
15433         u64 dma_mask, persist_dma_mask;
15434         netdev_features_t features = 0;
15435
15436         printk_once(KERN_INFO "%s\n", version);
15437
15438         err = pci_enable_device(pdev);
15439         if (err) {
15440                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15441                 return err;
15442         }
15443
15444         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15445         if (err) {
15446                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15447                 goto err_out_disable_pdev;
15448         }
15449
15450         pci_set_master(pdev);
15451
15452         /* Find power-management capability. */
15453         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15454         if (pm_cap == 0) {
15455                 dev_err(&pdev->dev,
15456                         "Cannot find Power Management capability, aborting\n");
15457                 err = -EIO;
15458                 goto err_out_free_res;
15459         }
15460
15461         err = pci_set_power_state(pdev, PCI_D0);
15462         if (err) {
15463                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15464                 goto err_out_free_res;
15465         }
15466
15467         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15468         if (!dev) {
15469                 err = -ENOMEM;
15470                 goto err_out_power_down;
15471         }
15472
15473         SET_NETDEV_DEV(dev, &pdev->dev);
15474
15475         tp = netdev_priv(dev);
15476         tp->pdev = pdev;
15477         tp->dev = dev;
15478         tp->pm_cap = pm_cap;
15479         tp->rx_mode = TG3_DEF_RX_MODE;
15480         tp->tx_mode = TG3_DEF_TX_MODE;
15481
15482         if (tg3_debug > 0)
15483                 tp->msg_enable = tg3_debug;
15484         else
15485                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15486
15487         /* The word/byte swap controls here control register access byte
15488          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15489          * setting below.
15490          */
15491         tp->misc_host_ctrl =
15492                 MISC_HOST_CTRL_MASK_PCI_INT |
15493                 MISC_HOST_CTRL_WORD_SWAP |
15494                 MISC_HOST_CTRL_INDIR_ACCESS |
15495                 MISC_HOST_CTRL_PCISTATE_RW;
15496
15497         /* The NONFRM (non-frame) byte/word swap controls take effect
15498          * on descriptor entries, anything which isn't packet data.
15499          *
15500          * The StrongARM chips on the board (one for tx, one for rx)
15501          * are running in big-endian mode.
15502          */
15503         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15504                         GRC_MODE_WSWAP_NONFRM_DATA);
15505 #ifdef __BIG_ENDIAN
15506         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15507 #endif
15508         spin_lock_init(&tp->lock);
15509         spin_lock_init(&tp->indirect_lock);
15510         INIT_WORK(&tp->reset_task, tg3_reset_task);
15511
15512         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15513         if (!tp->regs) {
15514                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15515                 err = -ENOMEM;
15516                 goto err_out_free_dev;
15517         }
15518
15519         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15520             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15521             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15522             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15523             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15524             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15525             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15526             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15527                 tg3_flag_set(tp, ENABLE_APE);
15528                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15529                 if (!tp->aperegs) {
15530                         dev_err(&pdev->dev,
15531                                 "Cannot map APE registers, aborting\n");
15532                         err = -ENOMEM;
15533                         goto err_out_iounmap;
15534                 }
15535         }
15536
15537         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15538         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15539
15540         dev->ethtool_ops = &tg3_ethtool_ops;
15541         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15542         dev->netdev_ops = &tg3_netdev_ops;
15543         dev->irq = pdev->irq;
15544
15545         err = tg3_get_invariants(tp);
15546         if (err) {
15547                 dev_err(&pdev->dev,
15548                         "Problem fetching invariants of chip, aborting\n");
15549                 goto err_out_apeunmap;
15550         }
15551
15552         /* The EPB bridge inside 5714, 5715, and 5780 and any
15553          * device behind the EPB cannot support DMA addresses > 40-bit.
15554          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15555          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15556          * do DMA address check in tg3_start_xmit().
15557          */
15558         if (tg3_flag(tp, IS_5788))
15559                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15560         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15561                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15562 #ifdef CONFIG_HIGHMEM
15563                 dma_mask = DMA_BIT_MASK(64);
15564 #endif
15565         } else
15566                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15567
15568         /* Configure DMA attributes. */
15569         if (dma_mask > DMA_BIT_MASK(32)) {
15570                 err = pci_set_dma_mask(pdev, dma_mask);
15571                 if (!err) {
15572                         features |= NETIF_F_HIGHDMA;
15573                         err = pci_set_consistent_dma_mask(pdev,
15574                                                           persist_dma_mask);
15575                         if (err < 0) {
15576                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15577                                         "DMA for consistent allocations\n");
15578                                 goto err_out_apeunmap;
15579                         }
15580                 }
15581         }
15582         if (err || dma_mask == DMA_BIT_MASK(32)) {
15583                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15584                 if (err) {
15585                         dev_err(&pdev->dev,
15586                                 "No usable DMA configuration, aborting\n");
15587                         goto err_out_apeunmap;
15588                 }
15589         }
15590
15591         tg3_init_bufmgr_config(tp);
15592
15593         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15594
15595         /* 5700 B0 chips do not support checksumming correctly due
15596          * to hardware bugs.
15597          */
15598         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15599                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15600
15601                 if (tg3_flag(tp, 5755_PLUS))
15602                         features |= NETIF_F_IPV6_CSUM;
15603         }
15604
15605         /* TSO is on by default on chips that support hardware TSO.
15606          * Firmware TSO on older chips gives lower performance, so it
15607          * is off by default, but can be enabled using ethtool.
15608          */
15609         if ((tg3_flag(tp, HW_TSO_1) ||
15610              tg3_flag(tp, HW_TSO_2) ||
15611              tg3_flag(tp, HW_TSO_3)) &&
15612             (features & NETIF_F_IP_CSUM))
15613                 features |= NETIF_F_TSO;
15614         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15615                 if (features & NETIF_F_IPV6_CSUM)
15616                         features |= NETIF_F_TSO6;
15617                 if (tg3_flag(tp, HW_TSO_3) ||
15618                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15619                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15620                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15621                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15622                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15623                         features |= NETIF_F_TSO_ECN;
15624         }
15625
15626         dev->features |= features;
15627         dev->vlan_features |= features;
15628
15629         /*
15630          * Add loopback capability only for a subset of devices that support
15631          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15632          * loopback for the remaining devices.
15633          */
15634         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15635             !tg3_flag(tp, CPMU_PRESENT))
15636                 /* Add the loopback capability */
15637                 features |= NETIF_F_LOOPBACK;
15638
15639         dev->hw_features |= features;
15640
15641         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15642             !tg3_flag(tp, TSO_CAPABLE) &&
15643             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15644                 tg3_flag_set(tp, MAX_RXPEND_64);
15645                 tp->rx_pending = 63;
15646         }
15647
15648         err = tg3_get_device_address(tp);
15649         if (err) {
15650                 dev_err(&pdev->dev,
15651                         "Could not obtain valid ethernet address, aborting\n");
15652                 goto err_out_apeunmap;
15653         }
15654
15655         /*
15656          * Reset chip in case UNDI or EFI driver did not shutdown
15657          * DMA self test will enable WDMAC and we'll see (spurious)
15658          * pending DMA on the PCI bus at that point.
15659          */
15660         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15661             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15662                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15663                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15664         }
15665
15666         err = tg3_test_dma(tp);
15667         if (err) {
15668                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15669                 goto err_out_apeunmap;
15670         }
15671
15672         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15673         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15674         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15675         for (i = 0; i < tp->irq_max; i++) {
15676                 struct tg3_napi *tnapi = &tp->napi[i];
15677
15678                 tnapi->tp = tp;
15679                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15680
15681                 tnapi->int_mbox = intmbx;
15682                 if (i <= 4)
15683                         intmbx += 0x8;
15684                 else
15685                         intmbx += 0x4;
15686
15687                 tnapi->consmbox = rcvmbx;
15688                 tnapi->prodmbox = sndmbx;
15689
15690                 if (i)
15691                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15692                 else
15693                         tnapi->coal_now = HOSTCC_MODE_NOW;
15694
15695                 if (!tg3_flag(tp, SUPPORT_MSIX))
15696                         break;
15697
15698                 /*
15699                  * If we support MSIX, we'll be using RSS.  If we're using
15700                  * RSS, the first vector only handles link interrupts and the
15701                  * remaining vectors handle rx and tx interrupts.  Reuse the
15702                  * mailbox values for the next iteration.  The values we setup
15703                  * above are still useful for the single vectored mode.
15704                  */
15705                 if (!i)
15706                         continue;
15707
15708                 rcvmbx += 0x8;
15709
15710                 if (sndmbx & 0x4)
15711                         sndmbx -= 0x4;
15712                 else
15713                         sndmbx += 0xc;
15714         }
15715
15716         tg3_init_coal(tp);
15717
15718         pci_set_drvdata(pdev, dev);
15719
15720         if (tg3_flag(tp, 5717_PLUS)) {
15721                 /* Resume a low-power mode */
15722                 tg3_frob_aux_power(tp, false);
15723         }
15724
15725         err = register_netdev(dev);
15726         if (err) {
15727                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15728                 goto err_out_apeunmap;
15729         }
15730
15731         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15732                     tp->board_part_number,
15733                     tp->pci_chip_rev_id,
15734                     tg3_bus_string(tp, str),
15735                     dev->dev_addr);
15736
15737         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15738                 struct phy_device *phydev;
15739                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15740                 netdev_info(dev,
15741                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15742                             phydev->drv->name, dev_name(&phydev->dev));
15743         } else {
15744                 char *ethtype;
15745
15746                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15747                         ethtype = "10/100Base-TX";
15748                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15749                         ethtype = "1000Base-SX";
15750                 else
15751                         ethtype = "10/100/1000Base-T";
15752
15753                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15754                             "(WireSpeed[%d], EEE[%d])\n",
15755                             tg3_phy_string(tp), ethtype,
15756                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15757                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15758         }
15759
15760         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15761                     (dev->features & NETIF_F_RXCSUM) != 0,
15762                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15763                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15764                     tg3_flag(tp, ENABLE_ASF) != 0,
15765                     tg3_flag(tp, TSO_CAPABLE) != 0);
15766         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15767                     tp->dma_rwctrl,
15768                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15769                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15770
15771         pci_save_state(pdev);
15772
15773         return 0;
15774
15775 err_out_apeunmap:
15776         if (tp->aperegs) {
15777                 iounmap(tp->aperegs);
15778                 tp->aperegs = NULL;
15779         }
15780
15781 err_out_iounmap:
15782         if (tp->regs) {
15783                 iounmap(tp->regs);
15784                 tp->regs = NULL;
15785         }
15786
15787 err_out_free_dev:
15788         free_netdev(dev);
15789
15790 err_out_power_down:
15791         pci_set_power_state(pdev, PCI_D3hot);
15792
15793 err_out_free_res:
15794         pci_release_regions(pdev);
15795
15796 err_out_disable_pdev:
15797         pci_disable_device(pdev);
15798         pci_set_drvdata(pdev, NULL);
15799         return err;
15800 }
15801
15802 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15803 {
15804         struct net_device *dev = pci_get_drvdata(pdev);
15805
15806         if (dev) {
15807                 struct tg3 *tp = netdev_priv(dev);
15808
15809                 if (tp->fw)
15810                         release_firmware(tp->fw);
15811
15812                 tg3_reset_task_cancel(tp);
15813
15814                 if (tg3_flag(tp, USE_PHYLIB)) {
15815                         tg3_phy_fini(tp);
15816                         tg3_mdio_fini(tp);
15817                 }
15818
15819                 unregister_netdev(dev);
15820                 if (tp->aperegs) {
15821                         iounmap(tp->aperegs);
15822                         tp->aperegs = NULL;
15823                 }
15824                 if (tp->regs) {
15825                         iounmap(tp->regs);
15826                         tp->regs = NULL;
15827                 }
15828                 free_netdev(dev);
15829                 pci_release_regions(pdev);
15830                 pci_disable_device(pdev);
15831                 pci_set_drvdata(pdev, NULL);
15832         }
15833 }
15834
15835 #ifdef CONFIG_PM_SLEEP
15836 static int tg3_suspend(struct device *device)
15837 {
15838         struct pci_dev *pdev = to_pci_dev(device);
15839         struct net_device *dev = pci_get_drvdata(pdev);
15840         struct tg3 *tp = netdev_priv(dev);
15841         int err;
15842
15843         if (!netif_running(dev))
15844                 return 0;
15845
15846         tg3_reset_task_cancel(tp);
15847         tg3_phy_stop(tp);
15848         tg3_netif_stop(tp);
15849
15850         del_timer_sync(&tp->timer);
15851
15852         tg3_full_lock(tp, 1);
15853         tg3_disable_ints(tp);
15854         tg3_full_unlock(tp);
15855
15856         netif_device_detach(dev);
15857
15858         tg3_full_lock(tp, 0);
15859         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15860         tg3_flag_clear(tp, INIT_COMPLETE);
15861         tg3_full_unlock(tp);
15862
15863         err = tg3_power_down_prepare(tp);
15864         if (err) {
15865                 int err2;
15866
15867                 tg3_full_lock(tp, 0);
15868
15869                 tg3_flag_set(tp, INIT_COMPLETE);
15870                 err2 = tg3_restart_hw(tp, 1);
15871                 if (err2)
15872                         goto out;
15873
15874                 tp->timer.expires = jiffies + tp->timer_offset;
15875                 add_timer(&tp->timer);
15876
15877                 netif_device_attach(dev);
15878                 tg3_netif_start(tp);
15879
15880 out:
15881                 tg3_full_unlock(tp);
15882
15883                 if (!err2)
15884                         tg3_phy_start(tp);
15885         }
15886
15887         return err;
15888 }
15889
15890 static int tg3_resume(struct device *device)
15891 {
15892         struct pci_dev *pdev = to_pci_dev(device);
15893         struct net_device *dev = pci_get_drvdata(pdev);
15894         struct tg3 *tp = netdev_priv(dev);
15895         int err;
15896
15897         if (!netif_running(dev))
15898                 return 0;
15899
15900         netif_device_attach(dev);
15901
15902         tg3_full_lock(tp, 0);
15903
15904         tg3_flag_set(tp, INIT_COMPLETE);
15905         err = tg3_restart_hw(tp, 1);
15906         if (err)
15907                 goto out;
15908
15909         tp->timer.expires = jiffies + tp->timer_offset;
15910         add_timer(&tp->timer);
15911
15912         tg3_netif_start(tp);
15913
15914 out:
15915         tg3_full_unlock(tp);
15916
15917         if (!err)
15918                 tg3_phy_start(tp);
15919
15920         return err;
15921 }
15922
15923 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15924 #define TG3_PM_OPS (&tg3_pm_ops)
15925
15926 #else
15927
15928 #define TG3_PM_OPS NULL
15929
15930 #endif /* CONFIG_PM_SLEEP */
15931
15932 /**
15933  * tg3_io_error_detected - called when PCI error is detected
15934  * @pdev: Pointer to PCI device
15935  * @state: The current pci connection state
15936  *
15937  * This function is called after a PCI bus error affecting
15938  * this device has been detected.
15939  */
15940 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15941                                               pci_channel_state_t state)
15942 {
15943         struct net_device *netdev = pci_get_drvdata(pdev);
15944         struct tg3 *tp = netdev_priv(netdev);
15945         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15946
15947         netdev_info(netdev, "PCI I/O error detected\n");
15948
15949         rtnl_lock();
15950
15951         if (!netif_running(netdev))
15952                 goto done;
15953
15954         tg3_phy_stop(tp);
15955
15956         tg3_netif_stop(tp);
15957
15958         del_timer_sync(&tp->timer);
15959
15960         /* Want to make sure that the reset task doesn't run */
15961         tg3_reset_task_cancel(tp);
15962         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15963
15964         netif_device_detach(netdev);
15965
15966         /* Clean up software state, even if MMIO is blocked */
15967         tg3_full_lock(tp, 0);
15968         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15969         tg3_full_unlock(tp);
15970
15971 done:
15972         if (state == pci_channel_io_perm_failure)
15973                 err = PCI_ERS_RESULT_DISCONNECT;
15974         else
15975                 pci_disable_device(pdev);
15976
15977         rtnl_unlock();
15978
15979         return err;
15980 }
15981
15982 /**
15983  * tg3_io_slot_reset - called after the pci bus has been reset.
15984  * @pdev: Pointer to PCI device
15985  *
15986  * Restart the card from scratch, as if from a cold-boot.
15987  * At this point, the card has exprienced a hard reset,
15988  * followed by fixups by BIOS, and has its config space
15989  * set up identically to what it was at cold boot.
15990  */
15991 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15992 {
15993         struct net_device *netdev = pci_get_drvdata(pdev);
15994         struct tg3 *tp = netdev_priv(netdev);
15995         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15996         int err;
15997
15998         rtnl_lock();
15999
16000         if (pci_enable_device(pdev)) {
16001                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16002                 goto done;
16003         }
16004
16005         pci_set_master(pdev);
16006         pci_restore_state(pdev);
16007         pci_save_state(pdev);
16008
16009         if (!netif_running(netdev)) {
16010                 rc = PCI_ERS_RESULT_RECOVERED;
16011                 goto done;
16012         }
16013
16014         err = tg3_power_up(tp);
16015         if (err)
16016                 goto done;
16017
16018         rc = PCI_ERS_RESULT_RECOVERED;
16019
16020 done:
16021         rtnl_unlock();
16022
16023         return rc;
16024 }
16025
16026 /**
16027  * tg3_io_resume - called when traffic can start flowing again.
16028  * @pdev: Pointer to PCI device
16029  *
16030  * This callback is called when the error recovery driver tells
16031  * us that its OK to resume normal operation.
16032  */
16033 static void tg3_io_resume(struct pci_dev *pdev)
16034 {
16035         struct net_device *netdev = pci_get_drvdata(pdev);
16036         struct tg3 *tp = netdev_priv(netdev);
16037         int err;
16038
16039         rtnl_lock();
16040
16041         if (!netif_running(netdev))
16042                 goto done;
16043
16044         tg3_full_lock(tp, 0);
16045         tg3_flag_set(tp, INIT_COMPLETE);
16046         err = tg3_restart_hw(tp, 1);
16047         tg3_full_unlock(tp);
16048         if (err) {
16049                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16050                 goto done;
16051         }
16052
16053         netif_device_attach(netdev);
16054
16055         tp->timer.expires = jiffies + tp->timer_offset;
16056         add_timer(&tp->timer);
16057
16058         tg3_netif_start(tp);
16059
16060         tg3_phy_start(tp);
16061
16062 done:
16063         rtnl_unlock();
16064 }
16065
16066 static struct pci_error_handlers tg3_err_handler = {
16067         .error_detected = tg3_io_error_detected,
16068         .slot_reset     = tg3_io_slot_reset,
16069         .resume         = tg3_io_resume
16070 };
16071
16072 static struct pci_driver tg3_driver = {
16073         .name           = DRV_MODULE_NAME,
16074         .id_table       = tg3_pci_tbl,
16075         .probe          = tg3_init_one,
16076         .remove         = __devexit_p(tg3_remove_one),
16077         .err_handler    = &tg3_err_handler,
16078         .driver.pm      = TG3_PM_OPS,
16079 };
16080
16081 static int __init tg3_init(void)
16082 {
16083         return pci_register_driver(&tg3_driver);
16084 }
16085
16086 static void __exit tg3_cleanup(void)
16087 {
16088         pci_unregister_driver(&tg3_driver);
16089 }
16090
16091 module_init(tg3_init);
16092 module_exit(tg3_cleanup);