]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: fix length overflow in VPD firmware parsing
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     121
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "November 2, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138 #define TG3_RSS_INDIR_TBL_SIZE          128
139
140 /* Do not place this n-ring entries value into the tp struct itself,
141  * we really want to expose these constants to GCC so that modulo et
142  * al.  operations are done with shifts and masks instead of with
143  * hw multiply/modulo instructions.  Another solution would be to
144  * replace things like '% foo' with '& (foo - 1)'.
145  */
146
147 #define TG3_TX_RING_SIZE                512
148 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
149
150 #define TG3_RX_STD_RING_BYTES(tp) \
151         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
152 #define TG3_RX_JMB_RING_BYTES(tp) \
153         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
154 #define TG3_RX_RCB_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
156 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
157                                  TG3_TX_RING_SIZE)
158 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159
160 #define TG3_DMA_BYTE_ENAB               64
161
162 #define TG3_RX_STD_DMA_SZ               1536
163 #define TG3_RX_JMB_DMA_SZ               9046
164
165 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
166
167 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
168 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169
170 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
171         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172
173 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175
176 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
177  * that are at least dword aligned when used in PCIX mode.  The driver
178  * works around this bug by double copying the packet.  This workaround
179  * is built into the normal double copy length check for efficiency.
180  *
181  * However, the double copy is only necessary on those architectures
182  * where unaligned memory accesses are inefficient.  For those architectures
183  * where unaligned memory accesses incur little penalty, we can reintegrate
184  * the 5701 in the normal rx path.  Doing so saves a device structure
185  * dereference by hardcoding the double copy threshold in place.
186  */
187 #define TG3_RX_COPY_THRESHOLD           256
188 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
189         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
190 #else
191         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
192 #endif
193
194 #if (NET_IP_ALIGN != 0)
195 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
196 #else
197 #define TG3_RX_OFFSET(tp)       0
198 #endif
199
200 /* minimum number of free TX descriptors required to wake up TX process */
201 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
202 #define TG3_TX_BD_DMA_MAX               4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207
208 #define FIRMWARE_TG3            "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
211
212 static char version[] __devinitdata =
213         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
214
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
222
223 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
226
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
309         {}
310 };
311
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
313
314 static const struct {
315         const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
317         { "rx_octets" },
318         { "rx_fragments" },
319         { "rx_ucast_packets" },
320         { "rx_mcast_packets" },
321         { "rx_bcast_packets" },
322         { "rx_fcs_errors" },
323         { "rx_align_errors" },
324         { "rx_xon_pause_rcvd" },
325         { "rx_xoff_pause_rcvd" },
326         { "rx_mac_ctrl_rcvd" },
327         { "rx_xoff_entered" },
328         { "rx_frame_too_long_errors" },
329         { "rx_jabbers" },
330         { "rx_undersize_packets" },
331         { "rx_in_length_errors" },
332         { "rx_out_length_errors" },
333         { "rx_64_or_less_octet_packets" },
334         { "rx_65_to_127_octet_packets" },
335         { "rx_128_to_255_octet_packets" },
336         { "rx_256_to_511_octet_packets" },
337         { "rx_512_to_1023_octet_packets" },
338         { "rx_1024_to_1522_octet_packets" },
339         { "rx_1523_to_2047_octet_packets" },
340         { "rx_2048_to_4095_octet_packets" },
341         { "rx_4096_to_8191_octet_packets" },
342         { "rx_8192_to_9022_octet_packets" },
343
344         { "tx_octets" },
345         { "tx_collisions" },
346
347         { "tx_xon_sent" },
348         { "tx_xoff_sent" },
349         { "tx_flow_control" },
350         { "tx_mac_errors" },
351         { "tx_single_collisions" },
352         { "tx_mult_collisions" },
353         { "tx_deferred" },
354         { "tx_excessive_collisions" },
355         { "tx_late_collisions" },
356         { "tx_collide_2times" },
357         { "tx_collide_3times" },
358         { "tx_collide_4times" },
359         { "tx_collide_5times" },
360         { "tx_collide_6times" },
361         { "tx_collide_7times" },
362         { "tx_collide_8times" },
363         { "tx_collide_9times" },
364         { "tx_collide_10times" },
365         { "tx_collide_11times" },
366         { "tx_collide_12times" },
367         { "tx_collide_13times" },
368         { "tx_collide_14times" },
369         { "tx_collide_15times" },
370         { "tx_ucast_packets" },
371         { "tx_mcast_packets" },
372         { "tx_bcast_packets" },
373         { "tx_carrier_sense_errors" },
374         { "tx_discards" },
375         { "tx_errors" },
376
377         { "dma_writeq_full" },
378         { "dma_write_prioq_full" },
379         { "rxbds_empty" },
380         { "rx_discards" },
381         { "rx_errors" },
382         { "rx_threshold_hit" },
383
384         { "dma_readq_full" },
385         { "dma_read_prioq_full" },
386         { "tx_comp_queue_full" },
387
388         { "ring_set_send_prod_index" },
389         { "ring_status_update" },
390         { "nic_irqs" },
391         { "nic_avoided_irqs" },
392         { "nic_tx_threshold_hit" },
393
394         { "mbuf_lwm_thresh_hit" },
395 };
396
397 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
398
399
400 static const struct {
401         const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403         { "nvram test        (online) " },
404         { "link test         (online) " },
405         { "register test     (offline)" },
406         { "memory test       (offline)" },
407         { "mac loopback test (offline)" },
408         { "phy loopback test (offline)" },
409         { "ext loopback test (offline)" },
410         { "interrupt test    (offline)" },
411 };
412
413 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
414
415
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
417 {
418         writel(val, tp->regs + off);
419 }
420
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
422 {
423         return readl(tp->regs + off);
424 }
425
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
427 {
428         writel(val, tp->aperegs + off);
429 }
430
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
432 {
433         return readl(tp->aperegs + off);
434 }
435
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&tp->indirect_lock, flags);
441         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443         spin_unlock_irqrestore(&tp->indirect_lock, flags);
444 }
445
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
447 {
448         writel(val, tp->regs + off);
449         readl(tp->regs + off);
450 }
451
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
453 {
454         unsigned long flags;
455         u32 val;
456
457         spin_lock_irqsave(&tp->indirect_lock, flags);
458         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460         spin_unlock_irqrestore(&tp->indirect_lock, flags);
461         return val;
462 }
463
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
465 {
466         unsigned long flags;
467
468         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470                                        TG3_64BIT_REG_LOW, val);
471                 return;
472         }
473         if (off == TG3_RX_STD_PROD_IDX_REG) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475                                        TG3_64BIT_REG_LOW, val);
476                 return;
477         }
478
479         spin_lock_irqsave(&tp->indirect_lock, flags);
480         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482         spin_unlock_irqrestore(&tp->indirect_lock, flags);
483
484         /* In indirect mode when disabling interrupts, we also need
485          * to clear the interrupt bit in the GRC local ctrl register.
486          */
487         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
488             (val == 0x1)) {
489                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
491         }
492 }
493
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
495 {
496         unsigned long flags;
497         u32 val;
498
499         spin_lock_irqsave(&tp->indirect_lock, flags);
500         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502         spin_unlock_irqrestore(&tp->indirect_lock, flags);
503         return val;
504 }
505
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507  * where it is unsafe to read back the register without some delay.
508  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
510  */
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
512 {
513         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514                 /* Non-posted methods */
515                 tp->write32(tp, off, val);
516         else {
517                 /* Posted method */
518                 tg3_write32(tp, off, val);
519                 if (usec_wait)
520                         udelay(usec_wait);
521                 tp->read32(tp, off);
522         }
523         /* Wait again after the read for the posted method to guarantee that
524          * the wait time is met.
525          */
526         if (usec_wait)
527                 udelay(usec_wait);
528 }
529
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
531 {
532         tp->write32_mbox(tp, off, val);
533         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534                 tp->read32_mbox(tp, off);
535 }
536
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
538 {
539         void __iomem *mbox = tp->regs + off;
540         writel(val, mbox);
541         if (tg3_flag(tp, TXD_MBOX_HWBUG))
542                 writel(val, mbox);
543         if (tg3_flag(tp, MBOX_WRITE_REORDER))
544                 readl(mbox);
545 }
546
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
548 {
549         return readl(tp->regs + off + GRCMBOX_BASE);
550 }
551
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
553 {
554         writel(val, tp->regs + off + GRCMBOX_BASE);
555 }
556
557 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
562
563 #define tw32(reg, val)                  tp->write32(tp, reg, val)
564 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg)                       tp->read32(tp, reg)
567
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
569 {
570         unsigned long flags;
571
572         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
574                 return;
575
576         spin_lock_irqsave(&tp->indirect_lock, flags);
577         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
580
581                 /* Always leave this as zero. */
582                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
583         } else {
584                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
586
587                 /* Always leave this as zero. */
588                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
589         }
590         spin_unlock_irqrestore(&tp->indirect_lock, flags);
591 }
592
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
594 {
595         unsigned long flags;
596
597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
599                 *val = 0;
600                 return;
601         }
602
603         spin_lock_irqsave(&tp->indirect_lock, flags);
604         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
607
608                 /* Always leave this as zero. */
609                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
610         } else {
611                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612                 *val = tr32(TG3PCI_MEM_WIN_DATA);
613
614                 /* Always leave this as zero. */
615                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
616         }
617         spin_unlock_irqrestore(&tp->indirect_lock, flags);
618 }
619
620 static void tg3_ape_lock_init(struct tg3 *tp)
621 {
622         int i;
623         u32 regbase, bit;
624
625         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626                 regbase = TG3_APE_LOCK_GRANT;
627         else
628                 regbase = TG3_APE_PER_LOCK_GRANT;
629
630         /* Make sure the driver hasn't any stale locks. */
631         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
632                 switch (i) {
633                 case TG3_APE_LOCK_PHY0:
634                 case TG3_APE_LOCK_PHY1:
635                 case TG3_APE_LOCK_PHY2:
636                 case TG3_APE_LOCK_PHY3:
637                         bit = APE_LOCK_GRANT_DRIVER;
638                         break;
639                 default:
640                         if (!tp->pci_fn)
641                                 bit = APE_LOCK_GRANT_DRIVER;
642                         else
643                                 bit = 1 << tp->pci_fn;
644                 }
645                 tg3_ape_write32(tp, regbase + 4 * i, bit);
646         }
647
648 }
649
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
651 {
652         int i, off;
653         int ret = 0;
654         u32 status, req, gnt, bit;
655
656         if (!tg3_flag(tp, ENABLE_APE))
657                 return 0;
658
659         switch (locknum) {
660         case TG3_APE_LOCK_GPIO:
661                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
662                         return 0;
663         case TG3_APE_LOCK_GRC:
664         case TG3_APE_LOCK_MEM:
665                 if (!tp->pci_fn)
666                         bit = APE_LOCK_REQ_DRIVER;
667                 else
668                         bit = 1 << tp->pci_fn;
669                 break;
670         default:
671                 return -EINVAL;
672         }
673
674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675                 req = TG3_APE_LOCK_REQ;
676                 gnt = TG3_APE_LOCK_GRANT;
677         } else {
678                 req = TG3_APE_PER_LOCK_REQ;
679                 gnt = TG3_APE_PER_LOCK_GRANT;
680         }
681
682         off = 4 * locknum;
683
684         tg3_ape_write32(tp, req + off, bit);
685
686         /* Wait for up to 1 millisecond to acquire lock. */
687         for (i = 0; i < 100; i++) {
688                 status = tg3_ape_read32(tp, gnt + off);
689                 if (status == bit)
690                         break;
691                 udelay(10);
692         }
693
694         if (status != bit) {
695                 /* Revoke the lock request. */
696                 tg3_ape_write32(tp, gnt + off, bit);
697                 ret = -EBUSY;
698         }
699
700         return ret;
701 }
702
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
704 {
705         u32 gnt, bit;
706
707         if (!tg3_flag(tp, ENABLE_APE))
708                 return;
709
710         switch (locknum) {
711         case TG3_APE_LOCK_GPIO:
712                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
713                         return;
714         case TG3_APE_LOCK_GRC:
715         case TG3_APE_LOCK_MEM:
716                 if (!tp->pci_fn)
717                         bit = APE_LOCK_GRANT_DRIVER;
718                 else
719                         bit = 1 << tp->pci_fn;
720                 break;
721         default:
722                 return;
723         }
724
725         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726                 gnt = TG3_APE_LOCK_GRANT;
727         else
728                 gnt = TG3_APE_PER_LOCK_GRANT;
729
730         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
731 }
732
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
734 {
735         int i;
736         u32 apedata;
737
738         /* NCSI does not support APE events */
739         if (tg3_flag(tp, APE_HAS_NCSI))
740                 return;
741
742         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743         if (apedata != APE_SEG_SIG_MAGIC)
744                 return;
745
746         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747         if (!(apedata & APE_FW_STATUS_READY))
748                 return;
749
750         /* Wait for up to 1 millisecond for APE to service previous event. */
751         for (i = 0; i < 10; i++) {
752                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
753                         return;
754
755                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
756
757                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759                                         event | APE_EVENT_STATUS_EVENT_PENDING);
760
761                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
762
763                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
764                         break;
765
766                 udelay(100);
767         }
768
769         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
771 }
772
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
774 {
775         u32 event;
776         u32 apedata;
777
778         if (!tg3_flag(tp, ENABLE_APE))
779                 return;
780
781         switch (kind) {
782         case RESET_KIND_INIT:
783                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784                                 APE_HOST_SEG_SIG_MAGIC);
785                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786                                 APE_HOST_SEG_LEN_MAGIC);
787                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792                                 APE_HOST_BEHAV_NO_PHYLOCK);
793                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794                                     TG3_APE_HOST_DRVR_STATE_START);
795
796                 event = APE_EVENT_STATUS_STATE_START;
797                 break;
798         case RESET_KIND_SHUTDOWN:
799                 /* With the interface we are currently using,
800                  * APE does not track driver state.  Wiping
801                  * out the HOST SEGMENT SIGNATURE forces
802                  * the APE to assume OS absent status.
803                  */
804                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
805
806                 if (device_may_wakeup(&tp->pdev->dev) &&
807                     tg3_flag(tp, WOL_ENABLE)) {
808                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809                                             TG3_APE_HOST_WOL_SPEED_AUTO);
810                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
811                 } else
812                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
813
814                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
815
816                 event = APE_EVENT_STATUS_STATE_UNLOAD;
817                 break;
818         case RESET_KIND_SUSPEND:
819                 event = APE_EVENT_STATUS_STATE_SUSPEND;
820                 break;
821         default:
822                 return;
823         }
824
825         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
826
827         tg3_ape_send_event(tp, event);
828 }
829
830 static void tg3_disable_ints(struct tg3 *tp)
831 {
832         int i;
833
834         tw32(TG3PCI_MISC_HOST_CTRL,
835              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836         for (i = 0; i < tp->irq_max; i++)
837                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
838 }
839
840 static void tg3_enable_ints(struct tg3 *tp)
841 {
842         int i;
843
844         tp->irq_sync = 0;
845         wmb();
846
847         tw32(TG3PCI_MISC_HOST_CTRL,
848              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
849
850         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851         for (i = 0; i < tp->irq_cnt; i++) {
852                 struct tg3_napi *tnapi = &tp->napi[i];
853
854                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855                 if (tg3_flag(tp, 1SHOT_MSI))
856                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
857
858                 tp->coal_now |= tnapi->coal_now;
859         }
860
861         /* Force an initial interrupt */
862         if (!tg3_flag(tp, TAGGED_STATUS) &&
863             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
865         else
866                 tw32(HOSTCC_MODE, tp->coal_now);
867
868         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
869 }
870
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
872 {
873         struct tg3 *tp = tnapi->tp;
874         struct tg3_hw_status *sblk = tnapi->hw_status;
875         unsigned int work_exists = 0;
876
877         /* check for phy events */
878         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879                 if (sblk->status & SD_STATUS_LINK_CHG)
880                         work_exists = 1;
881         }
882
883         /* check for TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
885                 work_exists = 1;
886
887         /* check for RX work to do */
888         if (tnapi->rx_rcb_prod_idx &&
889             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
890                 work_exists = 1;
891
892         return work_exists;
893 }
894
895 /* tg3_int_reenable
896  *  similar to tg3_enable_ints, but it accurately determines whether there
897  *  is new work pending and can return without flushing the PIO write
898  *  which reenables interrupts
899  */
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
901 {
902         struct tg3 *tp = tnapi->tp;
903
904         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
905         mmiowb();
906
907         /* When doing tagged status, this work check is unnecessary.
908          * The last_tag we write above tells the chip which piece of
909          * work we've completed.
910          */
911         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912                 tw32(HOSTCC_MODE, tp->coalesce_mode |
913                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
914 }
915
916 static void tg3_switch_clocks(struct tg3 *tp)
917 {
918         u32 clock_ctrl;
919         u32 orig_clock_ctrl;
920
921         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
922                 return;
923
924         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
925
926         orig_clock_ctrl = clock_ctrl;
927         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928                        CLOCK_CTRL_CLKRUN_OENABLE |
929                        0x1f);
930         tp->pci_clock_ctrl = clock_ctrl;
931
932         if (tg3_flag(tp, 5705_PLUS)) {
933                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
936                 }
937         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl |
940                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
941                             40);
942                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
944                             40);
945         }
946         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
947 }
948
949 #define PHY_BUSY_LOOPS  5000
950
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
952 {
953         u32 frame_val;
954         unsigned int loops;
955         int ret;
956
957         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
958                 tw32_f(MAC_MI_MODE,
959                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
960                 udelay(80);
961         }
962
963         *val = 0x0;
964
965         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966                       MI_COM_PHY_ADDR_MASK);
967         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968                       MI_COM_REG_ADDR_MASK);
969         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
970
971         tw32_f(MAC_MI_COM, frame_val);
972
973         loops = PHY_BUSY_LOOPS;
974         while (loops != 0) {
975                 udelay(10);
976                 frame_val = tr32(MAC_MI_COM);
977
978                 if ((frame_val & MI_COM_BUSY) == 0) {
979                         udelay(5);
980                         frame_val = tr32(MAC_MI_COM);
981                         break;
982                 }
983                 loops -= 1;
984         }
985
986         ret = -EBUSY;
987         if (loops != 0) {
988                 *val = frame_val & MI_COM_DATA_MASK;
989                 ret = 0;
990         }
991
992         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993                 tw32_f(MAC_MI_MODE, tp->mi_mode);
994                 udelay(80);
995         }
996
997         return ret;
998 }
999
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1001 {
1002         u32 frame_val;
1003         unsigned int loops;
1004         int ret;
1005
1006         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1008                 return 0;
1009
1010         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1011                 tw32_f(MAC_MI_MODE,
1012                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1013                 udelay(80);
1014         }
1015
1016         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017                       MI_COM_PHY_ADDR_MASK);
1018         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019                       MI_COM_REG_ADDR_MASK);
1020         frame_val |= (val & MI_COM_DATA_MASK);
1021         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1022
1023         tw32_f(MAC_MI_COM, frame_val);
1024
1025         loops = PHY_BUSY_LOOPS;
1026         while (loops != 0) {
1027                 udelay(10);
1028                 frame_val = tr32(MAC_MI_COM);
1029                 if ((frame_val & MI_COM_BUSY) == 0) {
1030                         udelay(5);
1031                         frame_val = tr32(MAC_MI_COM);
1032                         break;
1033                 }
1034                 loops -= 1;
1035         }
1036
1037         ret = -EBUSY;
1038         if (loops != 0)
1039                 ret = 0;
1040
1041         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1043                 udelay(80);
1044         }
1045
1046         return ret;
1047 }
1048
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1050 {
1051         int err;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1058         if (err)
1059                 goto done;
1060
1061         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1063         if (err)
1064                 goto done;
1065
1066         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1067
1068 done:
1069         return err;
1070 }
1071
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1073 {
1074         int err;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1081         if (err)
1082                 goto done;
1083
1084         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1086         if (err)
1087                 goto done;
1088
1089         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1090
1091 done:
1092         return err;
1093 }
1094
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1096 {
1097         int err;
1098
1099         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1100         if (!err)
1101                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102
1103         return err;
1104 }
1105
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1107 {
1108         int err;
1109
1110         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1111         if (!err)
1112                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113
1114         return err;
1115 }
1116
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1118 {
1119         int err;
1120
1121         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1124         if (!err)
1125                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126
1127         return err;
1128 }
1129
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1131 {
1132         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133                 set |= MII_TG3_AUXCTL_MISC_WREN;
1134
1135         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1136 }
1137
1138 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1139 {
1140         u32 val;
1141         int err;
1142
1143         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1144
1145         if (err)
1146                 return err;
1147         if (enable)
1148
1149                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1150         else
1151                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1152
1153         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1154                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1155
1156         return err;
1157 }
1158
1159 static int tg3_bmcr_reset(struct tg3 *tp)
1160 {
1161         u32 phy_control;
1162         int limit, err;
1163
1164         /* OK, reset it, and poll the BMCR_RESET bit until it
1165          * clears or we time out.
1166          */
1167         phy_control = BMCR_RESET;
1168         err = tg3_writephy(tp, MII_BMCR, phy_control);
1169         if (err != 0)
1170                 return -EBUSY;
1171
1172         limit = 5000;
1173         while (limit--) {
1174                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1175                 if (err != 0)
1176                         return -EBUSY;
1177
1178                 if ((phy_control & BMCR_RESET) == 0) {
1179                         udelay(40);
1180                         break;
1181                 }
1182                 udelay(10);
1183         }
1184         if (limit < 0)
1185                 return -EBUSY;
1186
1187         return 0;
1188 }
1189
1190 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1191 {
1192         struct tg3 *tp = bp->priv;
1193         u32 val;
1194
1195         spin_lock_bh(&tp->lock);
1196
1197         if (tg3_readphy(tp, reg, &val))
1198                 val = -EIO;
1199
1200         spin_unlock_bh(&tp->lock);
1201
1202         return val;
1203 }
1204
1205 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1206 {
1207         struct tg3 *tp = bp->priv;
1208         u32 ret = 0;
1209
1210         spin_lock_bh(&tp->lock);
1211
1212         if (tg3_writephy(tp, reg, val))
1213                 ret = -EIO;
1214
1215         spin_unlock_bh(&tp->lock);
1216
1217         return ret;
1218 }
1219
1220 static int tg3_mdio_reset(struct mii_bus *bp)
1221 {
1222         return 0;
1223 }
1224
1225 static void tg3_mdio_config_5785(struct tg3 *tp)
1226 {
1227         u32 val;
1228         struct phy_device *phydev;
1229
1230         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1231         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1232         case PHY_ID_BCM50610:
1233         case PHY_ID_BCM50610M:
1234                 val = MAC_PHYCFG2_50610_LED_MODES;
1235                 break;
1236         case PHY_ID_BCMAC131:
1237                 val = MAC_PHYCFG2_AC131_LED_MODES;
1238                 break;
1239         case PHY_ID_RTL8211C:
1240                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1241                 break;
1242         case PHY_ID_RTL8201E:
1243                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1244                 break;
1245         default:
1246                 return;
1247         }
1248
1249         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1250                 tw32(MAC_PHYCFG2, val);
1251
1252                 val = tr32(MAC_PHYCFG1);
1253                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1254                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1255                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1256                 tw32(MAC_PHYCFG1, val);
1257
1258                 return;
1259         }
1260
1261         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1262                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1263                        MAC_PHYCFG2_FMODE_MASK_MASK |
1264                        MAC_PHYCFG2_GMODE_MASK_MASK |
1265                        MAC_PHYCFG2_ACT_MASK_MASK   |
1266                        MAC_PHYCFG2_QUAL_MASK_MASK |
1267                        MAC_PHYCFG2_INBAND_ENABLE;
1268
1269         tw32(MAC_PHYCFG2, val);
1270
1271         val = tr32(MAC_PHYCFG1);
1272         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1273                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1274         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1275                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1276                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1277                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1278                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1279         }
1280         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1281                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1282         tw32(MAC_PHYCFG1, val);
1283
1284         val = tr32(MAC_EXT_RGMII_MODE);
1285         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1286                  MAC_RGMII_MODE_RX_QUALITY |
1287                  MAC_RGMII_MODE_RX_ACTIVITY |
1288                  MAC_RGMII_MODE_RX_ENG_DET |
1289                  MAC_RGMII_MODE_TX_ENABLE |
1290                  MAC_RGMII_MODE_TX_LOWPWR |
1291                  MAC_RGMII_MODE_TX_RESET);
1292         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1293                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1294                         val |= MAC_RGMII_MODE_RX_INT_B |
1295                                MAC_RGMII_MODE_RX_QUALITY |
1296                                MAC_RGMII_MODE_RX_ACTIVITY |
1297                                MAC_RGMII_MODE_RX_ENG_DET;
1298                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1299                         val |= MAC_RGMII_MODE_TX_ENABLE |
1300                                MAC_RGMII_MODE_TX_LOWPWR |
1301                                MAC_RGMII_MODE_TX_RESET;
1302         }
1303         tw32(MAC_EXT_RGMII_MODE, val);
1304 }
1305
1306 static void tg3_mdio_start(struct tg3 *tp)
1307 {
1308         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1309         tw32_f(MAC_MI_MODE, tp->mi_mode);
1310         udelay(80);
1311
1312         if (tg3_flag(tp, MDIOBUS_INITED) &&
1313             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1314                 tg3_mdio_config_5785(tp);
1315 }
1316
1317 static int tg3_mdio_init(struct tg3 *tp)
1318 {
1319         int i;
1320         u32 reg;
1321         struct phy_device *phydev;
1322
1323         if (tg3_flag(tp, 5717_PLUS)) {
1324                 u32 is_serdes;
1325
1326                 tp->phy_addr = tp->pci_fn + 1;
1327
1328                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1329                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1330                 else
1331                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1332                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1333                 if (is_serdes)
1334                         tp->phy_addr += 7;
1335         } else
1336                 tp->phy_addr = TG3_PHY_MII_ADDR;
1337
1338         tg3_mdio_start(tp);
1339
1340         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1341                 return 0;
1342
1343         tp->mdio_bus = mdiobus_alloc();
1344         if (tp->mdio_bus == NULL)
1345                 return -ENOMEM;
1346
1347         tp->mdio_bus->name     = "tg3 mdio bus";
1348         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1349                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1350         tp->mdio_bus->priv     = tp;
1351         tp->mdio_bus->parent   = &tp->pdev->dev;
1352         tp->mdio_bus->read     = &tg3_mdio_read;
1353         tp->mdio_bus->write    = &tg3_mdio_write;
1354         tp->mdio_bus->reset    = &tg3_mdio_reset;
1355         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1356         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1357
1358         for (i = 0; i < PHY_MAX_ADDR; i++)
1359                 tp->mdio_bus->irq[i] = PHY_POLL;
1360
1361         /* The bus registration will look for all the PHYs on the mdio bus.
1362          * Unfortunately, it does not ensure the PHY is powered up before
1363          * accessing the PHY ID registers.  A chip reset is the
1364          * quickest way to bring the device back to an operational state..
1365          */
1366         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1367                 tg3_bmcr_reset(tp);
1368
1369         i = mdiobus_register(tp->mdio_bus);
1370         if (i) {
1371                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1372                 mdiobus_free(tp->mdio_bus);
1373                 return i;
1374         }
1375
1376         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1377
1378         if (!phydev || !phydev->drv) {
1379                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1380                 mdiobus_unregister(tp->mdio_bus);
1381                 mdiobus_free(tp->mdio_bus);
1382                 return -ENODEV;
1383         }
1384
1385         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1386         case PHY_ID_BCM57780:
1387                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1388                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1389                 break;
1390         case PHY_ID_BCM50610:
1391         case PHY_ID_BCM50610M:
1392                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1393                                      PHY_BRCM_RX_REFCLK_UNUSED |
1394                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1395                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1396                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1397                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1398                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1399                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1400                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1401                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1402                 /* fallthru */
1403         case PHY_ID_RTL8211C:
1404                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1405                 break;
1406         case PHY_ID_RTL8201E:
1407         case PHY_ID_BCMAC131:
1408                 phydev->interface = PHY_INTERFACE_MODE_MII;
1409                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1410                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1411                 break;
1412         }
1413
1414         tg3_flag_set(tp, MDIOBUS_INITED);
1415
1416         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1417                 tg3_mdio_config_5785(tp);
1418
1419         return 0;
1420 }
1421
1422 static void tg3_mdio_fini(struct tg3 *tp)
1423 {
1424         if (tg3_flag(tp, MDIOBUS_INITED)) {
1425                 tg3_flag_clear(tp, MDIOBUS_INITED);
1426                 mdiobus_unregister(tp->mdio_bus);
1427                 mdiobus_free(tp->mdio_bus);
1428         }
1429 }
1430
1431 /* tp->lock is held. */
1432 static inline void tg3_generate_fw_event(struct tg3 *tp)
1433 {
1434         u32 val;
1435
1436         val = tr32(GRC_RX_CPU_EVENT);
1437         val |= GRC_RX_CPU_DRIVER_EVENT;
1438         tw32_f(GRC_RX_CPU_EVENT, val);
1439
1440         tp->last_event_jiffies = jiffies;
1441 }
1442
1443 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1444
1445 /* tp->lock is held. */
1446 static void tg3_wait_for_event_ack(struct tg3 *tp)
1447 {
1448         int i;
1449         unsigned int delay_cnt;
1450         long time_remain;
1451
1452         /* If enough time has passed, no wait is necessary. */
1453         time_remain = (long)(tp->last_event_jiffies + 1 +
1454                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1455                       (long)jiffies;
1456         if (time_remain < 0)
1457                 return;
1458
1459         /* Check if we can shorten the wait time. */
1460         delay_cnt = jiffies_to_usecs(time_remain);
1461         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1462                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1463         delay_cnt = (delay_cnt >> 3) + 1;
1464
1465         for (i = 0; i < delay_cnt; i++) {
1466                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1467                         break;
1468                 udelay(8);
1469         }
1470 }
1471
1472 /* tp->lock is held. */
1473 static void tg3_ump_link_report(struct tg3 *tp)
1474 {
1475         u32 reg;
1476         u32 val;
1477
1478         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1479                 return;
1480
1481         tg3_wait_for_event_ack(tp);
1482
1483         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1484
1485         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1486
1487         val = 0;
1488         if (!tg3_readphy(tp, MII_BMCR, &reg))
1489                 val = reg << 16;
1490         if (!tg3_readphy(tp, MII_BMSR, &reg))
1491                 val |= (reg & 0xffff);
1492         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1493
1494         val = 0;
1495         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1496                 val = reg << 16;
1497         if (!tg3_readphy(tp, MII_LPA, &reg))
1498                 val |= (reg & 0xffff);
1499         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1500
1501         val = 0;
1502         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1503                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1504                         val = reg << 16;
1505                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1506                         val |= (reg & 0xffff);
1507         }
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1509
1510         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1511                 val = reg << 16;
1512         else
1513                 val = 0;
1514         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1515
1516         tg3_generate_fw_event(tp);
1517 }
1518
1519 /* tp->lock is held. */
1520 static void tg3_stop_fw(struct tg3 *tp)
1521 {
1522         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1523                 /* Wait for RX cpu to ACK the previous event. */
1524                 tg3_wait_for_event_ack(tp);
1525
1526                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1527
1528                 tg3_generate_fw_event(tp);
1529
1530                 /* Wait for RX cpu to ACK this event. */
1531                 tg3_wait_for_event_ack(tp);
1532         }
1533 }
1534
1535 /* tp->lock is held. */
1536 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1537 {
1538         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1539                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1540
1541         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1542                 switch (kind) {
1543                 case RESET_KIND_INIT:
1544                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1545                                       DRV_STATE_START);
1546                         break;
1547
1548                 case RESET_KIND_SHUTDOWN:
1549                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1550                                       DRV_STATE_UNLOAD);
1551                         break;
1552
1553                 case RESET_KIND_SUSPEND:
1554                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1555                                       DRV_STATE_SUSPEND);
1556                         break;
1557
1558                 default:
1559                         break;
1560                 }
1561         }
1562
1563         if (kind == RESET_KIND_INIT ||
1564             kind == RESET_KIND_SUSPEND)
1565                 tg3_ape_driver_state_change(tp, kind);
1566 }
1567
1568 /* tp->lock is held. */
1569 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1570 {
1571         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1572                 switch (kind) {
1573                 case RESET_KIND_INIT:
1574                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1575                                       DRV_STATE_START_DONE);
1576                         break;
1577
1578                 case RESET_KIND_SHUTDOWN:
1579                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1580                                       DRV_STATE_UNLOAD_DONE);
1581                         break;
1582
1583                 default:
1584                         break;
1585                 }
1586         }
1587
1588         if (kind == RESET_KIND_SHUTDOWN)
1589                 tg3_ape_driver_state_change(tp, kind);
1590 }
1591
1592 /* tp->lock is held. */
1593 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1594 {
1595         if (tg3_flag(tp, ENABLE_ASF)) {
1596                 switch (kind) {
1597                 case RESET_KIND_INIT:
1598                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1599                                       DRV_STATE_START);
1600                         break;
1601
1602                 case RESET_KIND_SHUTDOWN:
1603                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1604                                       DRV_STATE_UNLOAD);
1605                         break;
1606
1607                 case RESET_KIND_SUSPEND:
1608                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1609                                       DRV_STATE_SUSPEND);
1610                         break;
1611
1612                 default:
1613                         break;
1614                 }
1615         }
1616 }
1617
1618 static int tg3_poll_fw(struct tg3 *tp)
1619 {
1620         int i;
1621         u32 val;
1622
1623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1624                 /* Wait up to 20ms for init done. */
1625                 for (i = 0; i < 200; i++) {
1626                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1627                                 return 0;
1628                         udelay(100);
1629                 }
1630                 return -ENODEV;
1631         }
1632
1633         /* Wait for firmware initialization to complete. */
1634         for (i = 0; i < 100000; i++) {
1635                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1636                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1637                         break;
1638                 udelay(10);
1639         }
1640
1641         /* Chip might not be fitted with firmware.  Some Sun onboard
1642          * parts are configured like that.  So don't signal the timeout
1643          * of the above loop as an error, but do report the lack of
1644          * running firmware once.
1645          */
1646         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1647                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1648
1649                 netdev_info(tp->dev, "No firmware running\n");
1650         }
1651
1652         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1653                 /* The 57765 A0 needs a little more
1654                  * time to do some important work.
1655                  */
1656                 mdelay(10);
1657         }
1658
1659         return 0;
1660 }
1661
1662 static void tg3_link_report(struct tg3 *tp)
1663 {
1664         if (!netif_carrier_ok(tp->dev)) {
1665                 netif_info(tp, link, tp->dev, "Link is down\n");
1666                 tg3_ump_link_report(tp);
1667         } else if (netif_msg_link(tp)) {
1668                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1669                             (tp->link_config.active_speed == SPEED_1000 ?
1670                              1000 :
1671                              (tp->link_config.active_speed == SPEED_100 ?
1672                               100 : 10)),
1673                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1674                              "full" : "half"));
1675
1676                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1677                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1678                             "on" : "off",
1679                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1680                             "on" : "off");
1681
1682                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1683                         netdev_info(tp->dev, "EEE is %s\n",
1684                                     tp->setlpicnt ? "enabled" : "disabled");
1685
1686                 tg3_ump_link_report(tp);
1687         }
1688 }
1689
1690 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1691 {
1692         u16 miireg;
1693
1694         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1695                 miireg = ADVERTISE_PAUSE_CAP;
1696         else if (flow_ctrl & FLOW_CTRL_TX)
1697                 miireg = ADVERTISE_PAUSE_ASYM;
1698         else if (flow_ctrl & FLOW_CTRL_RX)
1699                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1700         else
1701                 miireg = 0;
1702
1703         return miireg;
1704 }
1705
1706 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1707 {
1708         u16 miireg;
1709
1710         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1711                 miireg = ADVERTISE_1000XPAUSE;
1712         else if (flow_ctrl & FLOW_CTRL_TX)
1713                 miireg = ADVERTISE_1000XPSE_ASYM;
1714         else if (flow_ctrl & FLOW_CTRL_RX)
1715                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1716         else
1717                 miireg = 0;
1718
1719         return miireg;
1720 }
1721
1722 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1723 {
1724         u8 cap = 0;
1725
1726         if (lcladv & ADVERTISE_1000XPAUSE) {
1727                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1728                         if (rmtadv & LPA_1000XPAUSE)
1729                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1730                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1731                                 cap = FLOW_CTRL_RX;
1732                 } else {
1733                         if (rmtadv & LPA_1000XPAUSE)
1734                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1735                 }
1736         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1737                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1738                         cap = FLOW_CTRL_TX;
1739         }
1740
1741         return cap;
1742 }
1743
1744 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1745 {
1746         u8 autoneg;
1747         u8 flowctrl = 0;
1748         u32 old_rx_mode = tp->rx_mode;
1749         u32 old_tx_mode = tp->tx_mode;
1750
1751         if (tg3_flag(tp, USE_PHYLIB))
1752                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1753         else
1754                 autoneg = tp->link_config.autoneg;
1755
1756         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1757                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1758                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1759                 else
1760                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1761         } else
1762                 flowctrl = tp->link_config.flowctrl;
1763
1764         tp->link_config.active_flowctrl = flowctrl;
1765
1766         if (flowctrl & FLOW_CTRL_RX)
1767                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1768         else
1769                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1770
1771         if (old_rx_mode != tp->rx_mode)
1772                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1773
1774         if (flowctrl & FLOW_CTRL_TX)
1775                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1776         else
1777                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1778
1779         if (old_tx_mode != tp->tx_mode)
1780                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1781 }
1782
1783 static void tg3_adjust_link(struct net_device *dev)
1784 {
1785         u8 oldflowctrl, linkmesg = 0;
1786         u32 mac_mode, lcl_adv, rmt_adv;
1787         struct tg3 *tp = netdev_priv(dev);
1788         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1789
1790         spin_lock_bh(&tp->lock);
1791
1792         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1793                                     MAC_MODE_HALF_DUPLEX);
1794
1795         oldflowctrl = tp->link_config.active_flowctrl;
1796
1797         if (phydev->link) {
1798                 lcl_adv = 0;
1799                 rmt_adv = 0;
1800
1801                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1802                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1803                 else if (phydev->speed == SPEED_1000 ||
1804                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1805                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1806                 else
1807                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1808
1809                 if (phydev->duplex == DUPLEX_HALF)
1810                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1811                 else {
1812                         lcl_adv = tg3_advert_flowctrl_1000T(
1813                                   tp->link_config.flowctrl);
1814
1815                         if (phydev->pause)
1816                                 rmt_adv = LPA_PAUSE_CAP;
1817                         if (phydev->asym_pause)
1818                                 rmt_adv |= LPA_PAUSE_ASYM;
1819                 }
1820
1821                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1822         } else
1823                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1824
1825         if (mac_mode != tp->mac_mode) {
1826                 tp->mac_mode = mac_mode;
1827                 tw32_f(MAC_MODE, tp->mac_mode);
1828                 udelay(40);
1829         }
1830
1831         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1832                 if (phydev->speed == SPEED_10)
1833                         tw32(MAC_MI_STAT,
1834                              MAC_MI_STAT_10MBPS_MODE |
1835                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1836                 else
1837                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1838         }
1839
1840         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1841                 tw32(MAC_TX_LENGTHS,
1842                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1843                       (6 << TX_LENGTHS_IPG_SHIFT) |
1844                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1845         else
1846                 tw32(MAC_TX_LENGTHS,
1847                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1848                       (6 << TX_LENGTHS_IPG_SHIFT) |
1849                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1850
1851         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1852             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1853             phydev->speed != tp->link_config.active_speed ||
1854             phydev->duplex != tp->link_config.active_duplex ||
1855             oldflowctrl != tp->link_config.active_flowctrl)
1856                 linkmesg = 1;
1857
1858         tp->link_config.active_speed = phydev->speed;
1859         tp->link_config.active_duplex = phydev->duplex;
1860
1861         spin_unlock_bh(&tp->lock);
1862
1863         if (linkmesg)
1864                 tg3_link_report(tp);
1865 }
1866
1867 static int tg3_phy_init(struct tg3 *tp)
1868 {
1869         struct phy_device *phydev;
1870
1871         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1872                 return 0;
1873
1874         /* Bring the PHY back to a known state. */
1875         tg3_bmcr_reset(tp);
1876
1877         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1878
1879         /* Attach the MAC to the PHY. */
1880         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1881                              phydev->dev_flags, phydev->interface);
1882         if (IS_ERR(phydev)) {
1883                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1884                 return PTR_ERR(phydev);
1885         }
1886
1887         /* Mask with MAC supported features. */
1888         switch (phydev->interface) {
1889         case PHY_INTERFACE_MODE_GMII:
1890         case PHY_INTERFACE_MODE_RGMII:
1891                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1892                         phydev->supported &= (PHY_GBIT_FEATURES |
1893                                               SUPPORTED_Pause |
1894                                               SUPPORTED_Asym_Pause);
1895                         break;
1896                 }
1897                 /* fallthru */
1898         case PHY_INTERFACE_MODE_MII:
1899                 phydev->supported &= (PHY_BASIC_FEATURES |
1900                                       SUPPORTED_Pause |
1901                                       SUPPORTED_Asym_Pause);
1902                 break;
1903         default:
1904                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1905                 return -EINVAL;
1906         }
1907
1908         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1909
1910         phydev->advertising = phydev->supported;
1911
1912         return 0;
1913 }
1914
1915 static void tg3_phy_start(struct tg3 *tp)
1916 {
1917         struct phy_device *phydev;
1918
1919         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1920                 return;
1921
1922         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1923
1924         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1925                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1926                 phydev->speed = tp->link_config.orig_speed;
1927                 phydev->duplex = tp->link_config.orig_duplex;
1928                 phydev->autoneg = tp->link_config.orig_autoneg;
1929                 phydev->advertising = tp->link_config.orig_advertising;
1930         }
1931
1932         phy_start(phydev);
1933
1934         phy_start_aneg(phydev);
1935 }
1936
1937 static void tg3_phy_stop(struct tg3 *tp)
1938 {
1939         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1940                 return;
1941
1942         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1943 }
1944
1945 static void tg3_phy_fini(struct tg3 *tp)
1946 {
1947         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1948                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1949                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1950         }
1951 }
1952
1953 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1954 {
1955         int err;
1956         u32 val;
1957
1958         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1959                 return 0;
1960
1961         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1962                 /* Cannot do read-modify-write on 5401 */
1963                 err = tg3_phy_auxctl_write(tp,
1964                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1965                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1966                                            0x4c20);
1967                 goto done;
1968         }
1969
1970         err = tg3_phy_auxctl_read(tp,
1971                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1972         if (err)
1973                 return err;
1974
1975         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1976         err = tg3_phy_auxctl_write(tp,
1977                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1978
1979 done:
1980         return err;
1981 }
1982
1983 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1984 {
1985         u32 phytest;
1986
1987         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1988                 u32 phy;
1989
1990                 tg3_writephy(tp, MII_TG3_FET_TEST,
1991                              phytest | MII_TG3_FET_SHADOW_EN);
1992                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1993                         if (enable)
1994                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1995                         else
1996                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1997                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1998                 }
1999                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2000         }
2001 }
2002
2003 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2004 {
2005         u32 reg;
2006
2007         if (!tg3_flag(tp, 5705_PLUS) ||
2008             (tg3_flag(tp, 5717_PLUS) &&
2009              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2010                 return;
2011
2012         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2013                 tg3_phy_fet_toggle_apd(tp, enable);
2014                 return;
2015         }
2016
2017         reg = MII_TG3_MISC_SHDW_WREN |
2018               MII_TG3_MISC_SHDW_SCR5_SEL |
2019               MII_TG3_MISC_SHDW_SCR5_LPED |
2020               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2021               MII_TG3_MISC_SHDW_SCR5_SDTL |
2022               MII_TG3_MISC_SHDW_SCR5_C125OE;
2023         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2024                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2025
2026         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2027
2028
2029         reg = MII_TG3_MISC_SHDW_WREN |
2030               MII_TG3_MISC_SHDW_APD_SEL |
2031               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2032         if (enable)
2033                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2034
2035         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2036 }
2037
2038 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2039 {
2040         u32 phy;
2041
2042         if (!tg3_flag(tp, 5705_PLUS) ||
2043             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2044                 return;
2045
2046         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2047                 u32 ephy;
2048
2049                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2050                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2051
2052                         tg3_writephy(tp, MII_TG3_FET_TEST,
2053                                      ephy | MII_TG3_FET_SHADOW_EN);
2054                         if (!tg3_readphy(tp, reg, &phy)) {
2055                                 if (enable)
2056                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2057                                 else
2058                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2059                                 tg3_writephy(tp, reg, phy);
2060                         }
2061                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2062                 }
2063         } else {
2064                 int ret;
2065
2066                 ret = tg3_phy_auxctl_read(tp,
2067                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2068                 if (!ret) {
2069                         if (enable)
2070                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2071                         else
2072                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2073                         tg3_phy_auxctl_write(tp,
2074                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2075                 }
2076         }
2077 }
2078
2079 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2080 {
2081         int ret;
2082         u32 val;
2083
2084         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2085                 return;
2086
2087         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2088         if (!ret)
2089                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2090                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2091 }
2092
2093 static void tg3_phy_apply_otp(struct tg3 *tp)
2094 {
2095         u32 otp, phy;
2096
2097         if (!tp->phy_otp)
2098                 return;
2099
2100         otp = tp->phy_otp;
2101
2102         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2103                 return;
2104
2105         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2106         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2107         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2108
2109         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2110               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2111         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2112
2113         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2114         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2115         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2116
2117         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2118         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2119
2120         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2121         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2122
2123         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2124               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2125         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2126
2127         tg3_phy_toggle_auxctl_smdsp(tp, false);
2128 }
2129
2130 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2131 {
2132         u32 val;
2133
2134         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2135                 return;
2136
2137         tp->setlpicnt = 0;
2138
2139         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2140             current_link_up == 1 &&
2141             tp->link_config.active_duplex == DUPLEX_FULL &&
2142             (tp->link_config.active_speed == SPEED_100 ||
2143              tp->link_config.active_speed == SPEED_1000)) {
2144                 u32 eeectl;
2145
2146                 if (tp->link_config.active_speed == SPEED_1000)
2147                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2148                 else
2149                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2150
2151                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2152
2153                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2154                                   TG3_CL45_D7_EEERES_STAT, &val);
2155
2156                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2157                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2158                         tp->setlpicnt = 2;
2159         }
2160
2161         if (!tp->setlpicnt) {
2162                 if (current_link_up == 1 &&
2163                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2164                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2165                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2166                 }
2167
2168                 val = tr32(TG3_CPMU_EEE_MODE);
2169                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2170         }
2171 }
2172
2173 static void tg3_phy_eee_enable(struct tg3 *tp)
2174 {
2175         u32 val;
2176
2177         if (tp->link_config.active_speed == SPEED_1000 &&
2178             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2179              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2180              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
2181             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2182                 val = MII_TG3_DSP_TAP26_ALNOKO |
2183                       MII_TG3_DSP_TAP26_RMRXSTO;
2184                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2185                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2186         }
2187
2188         val = tr32(TG3_CPMU_EEE_MODE);
2189         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2190 }
2191
2192 static int tg3_wait_macro_done(struct tg3 *tp)
2193 {
2194         int limit = 100;
2195
2196         while (limit--) {
2197                 u32 tmp32;
2198
2199                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2200                         if ((tmp32 & 0x1000) == 0)
2201                                 break;
2202                 }
2203         }
2204         if (limit < 0)
2205                 return -EBUSY;
2206
2207         return 0;
2208 }
2209
2210 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2211 {
2212         static const u32 test_pat[4][6] = {
2213         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2214         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2215         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2216         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2217         };
2218         int chan;
2219
2220         for (chan = 0; chan < 4; chan++) {
2221                 int i;
2222
2223                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2224                              (chan * 0x2000) | 0x0200);
2225                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2226
2227                 for (i = 0; i < 6; i++)
2228                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2229                                      test_pat[chan][i]);
2230
2231                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2232                 if (tg3_wait_macro_done(tp)) {
2233                         *resetp = 1;
2234                         return -EBUSY;
2235                 }
2236
2237                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2238                              (chan * 0x2000) | 0x0200);
2239                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2240                 if (tg3_wait_macro_done(tp)) {
2241                         *resetp = 1;
2242                         return -EBUSY;
2243                 }
2244
2245                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2246                 if (tg3_wait_macro_done(tp)) {
2247                         *resetp = 1;
2248                         return -EBUSY;
2249                 }
2250
2251                 for (i = 0; i < 6; i += 2) {
2252                         u32 low, high;
2253
2254                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2255                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2256                             tg3_wait_macro_done(tp)) {
2257                                 *resetp = 1;
2258                                 return -EBUSY;
2259                         }
2260                         low &= 0x7fff;
2261                         high &= 0x000f;
2262                         if (low != test_pat[chan][i] ||
2263                             high != test_pat[chan][i+1]) {
2264                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2265                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2266                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2267
2268                                 return -EBUSY;
2269                         }
2270                 }
2271         }
2272
2273         return 0;
2274 }
2275
2276 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2277 {
2278         int chan;
2279
2280         for (chan = 0; chan < 4; chan++) {
2281                 int i;
2282
2283                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2284                              (chan * 0x2000) | 0x0200);
2285                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2286                 for (i = 0; i < 6; i++)
2287                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2288                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2289                 if (tg3_wait_macro_done(tp))
2290                         return -EBUSY;
2291         }
2292
2293         return 0;
2294 }
2295
2296 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2297 {
2298         u32 reg32, phy9_orig;
2299         int retries, do_phy_reset, err;
2300
2301         retries = 10;
2302         do_phy_reset = 1;
2303         do {
2304                 if (do_phy_reset) {
2305                         err = tg3_bmcr_reset(tp);
2306                         if (err)
2307                                 return err;
2308                         do_phy_reset = 0;
2309                 }
2310
2311                 /* Disable transmitter and interrupt.  */
2312                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2313                         continue;
2314
2315                 reg32 |= 0x3000;
2316                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2317
2318                 /* Set full-duplex, 1000 mbps.  */
2319                 tg3_writephy(tp, MII_BMCR,
2320                              BMCR_FULLDPLX | BMCR_SPEED1000);
2321
2322                 /* Set to master mode.  */
2323                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2324                         continue;
2325
2326                 tg3_writephy(tp, MII_CTRL1000,
2327                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2328
2329                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2330                 if (err)
2331                         return err;
2332
2333                 /* Block the PHY control access.  */
2334                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2335
2336                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2337                 if (!err)
2338                         break;
2339         } while (--retries);
2340
2341         err = tg3_phy_reset_chanpat(tp);
2342         if (err)
2343                 return err;
2344
2345         tg3_phydsp_write(tp, 0x8005, 0x0000);
2346
2347         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2348         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2349
2350         tg3_phy_toggle_auxctl_smdsp(tp, false);
2351
2352         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2353
2354         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2355                 reg32 &= ~0x3000;
2356                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2357         } else if (!err)
2358                 err = -EBUSY;
2359
2360         return err;
2361 }
2362
2363 /* This will reset the tigon3 PHY if there is no valid
2364  * link unless the FORCE argument is non-zero.
2365  */
2366 static int tg3_phy_reset(struct tg3 *tp)
2367 {
2368         u32 val, cpmuctrl;
2369         int err;
2370
2371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2372                 val = tr32(GRC_MISC_CFG);
2373                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2374                 udelay(40);
2375         }
2376         err  = tg3_readphy(tp, MII_BMSR, &val);
2377         err |= tg3_readphy(tp, MII_BMSR, &val);
2378         if (err != 0)
2379                 return -EBUSY;
2380
2381         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2382                 netif_carrier_off(tp->dev);
2383                 tg3_link_report(tp);
2384         }
2385
2386         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2387             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2388             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2389                 err = tg3_phy_reset_5703_4_5(tp);
2390                 if (err)
2391                         return err;
2392                 goto out;
2393         }
2394
2395         cpmuctrl = 0;
2396         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2397             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2398                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2399                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2400                         tw32(TG3_CPMU_CTRL,
2401                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2402         }
2403
2404         err = tg3_bmcr_reset(tp);
2405         if (err)
2406                 return err;
2407
2408         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2409                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2410                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2411
2412                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2413         }
2414
2415         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2416             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2417                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2418                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2419                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2420                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2421                         udelay(40);
2422                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2423                 }
2424         }
2425
2426         if (tg3_flag(tp, 5717_PLUS) &&
2427             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2428                 return 0;
2429
2430         tg3_phy_apply_otp(tp);
2431
2432         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2433                 tg3_phy_toggle_apd(tp, true);
2434         else
2435                 tg3_phy_toggle_apd(tp, false);
2436
2437 out:
2438         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2439             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2440                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2441                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2442                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2443         }
2444
2445         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2446                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2447                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2448         }
2449
2450         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2451                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2452                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2453                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2454                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2455                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2456                 }
2457         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2458                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2459                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2460                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2461                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2462                                 tg3_writephy(tp, MII_TG3_TEST1,
2463                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2464                         } else
2465                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2466
2467                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2468                 }
2469         }
2470
2471         /* Set Extended packet length bit (bit 14) on all chips that */
2472         /* support jumbo frames */
2473         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2474                 /* Cannot do read-modify-write on 5401 */
2475                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2476         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2477                 /* Set bit 14 with read-modify-write to preserve other bits */
2478                 err = tg3_phy_auxctl_read(tp,
2479                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2480                 if (!err)
2481                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2482                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2483         }
2484
2485         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2486          * jumbo frames transmission.
2487          */
2488         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2489                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2490                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2491                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2492         }
2493
2494         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2495                 /* adjust output voltage */
2496                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2497         }
2498
2499         tg3_phy_toggle_automdix(tp, 1);
2500         tg3_phy_set_wirespeed(tp);
2501         return 0;
2502 }
2503
2504 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2505 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2506 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2507                                           TG3_GPIO_MSG_NEED_VAUX)
2508 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2509         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2510          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2511          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2512          (TG3_GPIO_MSG_DRVR_PRES << 12))
2513
2514 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2515         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2516          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2517          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2518          (TG3_GPIO_MSG_NEED_VAUX << 12))
2519
2520 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2521 {
2522         u32 status, shift;
2523
2524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2525             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2526                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2527         else
2528                 status = tr32(TG3_CPMU_DRV_STATUS);
2529
2530         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2531         status &= ~(TG3_GPIO_MSG_MASK << shift);
2532         status |= (newstat << shift);
2533
2534         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2535             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2536                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2537         else
2538                 tw32(TG3_CPMU_DRV_STATUS, status);
2539
2540         return status >> TG3_APE_GPIO_MSG_SHIFT;
2541 }
2542
2543 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2544 {
2545         if (!tg3_flag(tp, IS_NIC))
2546                 return 0;
2547
2548         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2549             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2551                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2552                         return -EIO;
2553
2554                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2555
2556                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2557                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2560         } else {
2561                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2562                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2563         }
2564
2565         return 0;
2566 }
2567
2568 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2569 {
2570         u32 grc_local_ctrl;
2571
2572         if (!tg3_flag(tp, IS_NIC) ||
2573             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2574             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2575                 return;
2576
2577         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2578
2579         tw32_wait_f(GRC_LOCAL_CTRL,
2580                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2581                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2582
2583         tw32_wait_f(GRC_LOCAL_CTRL,
2584                     grc_local_ctrl,
2585                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2586
2587         tw32_wait_f(GRC_LOCAL_CTRL,
2588                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2589                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2590 }
2591
2592 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2593 {
2594         if (!tg3_flag(tp, IS_NIC))
2595                 return;
2596
2597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2599                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2600                             (GRC_LCLCTRL_GPIO_OE0 |
2601                              GRC_LCLCTRL_GPIO_OE1 |
2602                              GRC_LCLCTRL_GPIO_OE2 |
2603                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2604                              GRC_LCLCTRL_GPIO_OUTPUT1),
2605                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2606         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2607                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2608                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2609                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2610                                      GRC_LCLCTRL_GPIO_OE1 |
2611                                      GRC_LCLCTRL_GPIO_OE2 |
2612                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2613                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2614                                      tp->grc_local_ctrl;
2615                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2616                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2617
2618                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2619                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2620                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2621
2622                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2623                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2624                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2625         } else {
2626                 u32 no_gpio2;
2627                 u32 grc_local_ctrl = 0;
2628
2629                 /* Workaround to prevent overdrawing Amps. */
2630                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2631                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2632                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2633                                     grc_local_ctrl,
2634                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2635                 }
2636
2637                 /* On 5753 and variants, GPIO2 cannot be used. */
2638                 no_gpio2 = tp->nic_sram_data_cfg &
2639                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2640
2641                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2642                                   GRC_LCLCTRL_GPIO_OE1 |
2643                                   GRC_LCLCTRL_GPIO_OE2 |
2644                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2645                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2646                 if (no_gpio2) {
2647                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2648                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2649                 }
2650                 tw32_wait_f(GRC_LOCAL_CTRL,
2651                             tp->grc_local_ctrl | grc_local_ctrl,
2652                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2653
2654                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2655
2656                 tw32_wait_f(GRC_LOCAL_CTRL,
2657                             tp->grc_local_ctrl | grc_local_ctrl,
2658                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2659
2660                 if (!no_gpio2) {
2661                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2662                         tw32_wait_f(GRC_LOCAL_CTRL,
2663                                     tp->grc_local_ctrl | grc_local_ctrl,
2664                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2665                 }
2666         }
2667 }
2668
2669 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2670 {
2671         u32 msg = 0;
2672
2673         /* Serialize power state transitions */
2674         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2675                 return;
2676
2677         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2678                 msg = TG3_GPIO_MSG_NEED_VAUX;
2679
2680         msg = tg3_set_function_status(tp, msg);
2681
2682         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2683                 goto done;
2684
2685         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2686                 tg3_pwrsrc_switch_to_vaux(tp);
2687         else
2688                 tg3_pwrsrc_die_with_vmain(tp);
2689
2690 done:
2691         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2692 }
2693
2694 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2695 {
2696         bool need_vaux = false;
2697
2698         /* The GPIOs do something completely different on 57765. */
2699         if (!tg3_flag(tp, IS_NIC) ||
2700             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2701                 return;
2702
2703         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2704             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2705             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2706                 tg3_frob_aux_power_5717(tp, include_wol ?
2707                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2708                 return;
2709         }
2710
2711         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2712                 struct net_device *dev_peer;
2713
2714                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2715
2716                 /* remove_one() may have been run on the peer. */
2717                 if (dev_peer) {
2718                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2719
2720                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2721                                 return;
2722
2723                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2724                             tg3_flag(tp_peer, ENABLE_ASF))
2725                                 need_vaux = true;
2726                 }
2727         }
2728
2729         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2730             tg3_flag(tp, ENABLE_ASF))
2731                 need_vaux = true;
2732
2733         if (need_vaux)
2734                 tg3_pwrsrc_switch_to_vaux(tp);
2735         else
2736                 tg3_pwrsrc_die_with_vmain(tp);
2737 }
2738
2739 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2740 {
2741         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2742                 return 1;
2743         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2744                 if (speed != SPEED_10)
2745                         return 1;
2746         } else if (speed == SPEED_10)
2747                 return 1;
2748
2749         return 0;
2750 }
2751
2752 static int tg3_setup_phy(struct tg3 *, int);
2753 static int tg3_halt_cpu(struct tg3 *, u32);
2754
2755 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2756 {
2757         u32 val;
2758
2759         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2760                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2761                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2762                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2763
2764                         sg_dig_ctrl |=
2765                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2766                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2767                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2768                 }
2769                 return;
2770         }
2771
2772         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2773                 tg3_bmcr_reset(tp);
2774                 val = tr32(GRC_MISC_CFG);
2775                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2776                 udelay(40);
2777                 return;
2778         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2779                 u32 phytest;
2780                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2781                         u32 phy;
2782
2783                         tg3_writephy(tp, MII_ADVERTISE, 0);
2784                         tg3_writephy(tp, MII_BMCR,
2785                                      BMCR_ANENABLE | BMCR_ANRESTART);
2786
2787                         tg3_writephy(tp, MII_TG3_FET_TEST,
2788                                      phytest | MII_TG3_FET_SHADOW_EN);
2789                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2790                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2791                                 tg3_writephy(tp,
2792                                              MII_TG3_FET_SHDW_AUXMODE4,
2793                                              phy);
2794                         }
2795                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2796                 }
2797                 return;
2798         } else if (do_low_power) {
2799                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2800                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2801
2802                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2803                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2804                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2805                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2806         }
2807
2808         /* The PHY should not be powered down on some chips because
2809          * of bugs.
2810          */
2811         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2812             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2813             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2814              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2815             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2816              !tp->pci_fn))
2817                 return;
2818
2819         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2820             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2821                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2822                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2823                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2824                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2825         }
2826
2827         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2828 }
2829
2830 /* tp->lock is held. */
2831 static int tg3_nvram_lock(struct tg3 *tp)
2832 {
2833         if (tg3_flag(tp, NVRAM)) {
2834                 int i;
2835
2836                 if (tp->nvram_lock_cnt == 0) {
2837                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2838                         for (i = 0; i < 8000; i++) {
2839                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2840                                         break;
2841                                 udelay(20);
2842                         }
2843                         if (i == 8000) {
2844                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2845                                 return -ENODEV;
2846                         }
2847                 }
2848                 tp->nvram_lock_cnt++;
2849         }
2850         return 0;
2851 }
2852
2853 /* tp->lock is held. */
2854 static void tg3_nvram_unlock(struct tg3 *tp)
2855 {
2856         if (tg3_flag(tp, NVRAM)) {
2857                 if (tp->nvram_lock_cnt > 0)
2858                         tp->nvram_lock_cnt--;
2859                 if (tp->nvram_lock_cnt == 0)
2860                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2861         }
2862 }
2863
2864 /* tp->lock is held. */
2865 static void tg3_enable_nvram_access(struct tg3 *tp)
2866 {
2867         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2868                 u32 nvaccess = tr32(NVRAM_ACCESS);
2869
2870                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2871         }
2872 }
2873
2874 /* tp->lock is held. */
2875 static void tg3_disable_nvram_access(struct tg3 *tp)
2876 {
2877         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2878                 u32 nvaccess = tr32(NVRAM_ACCESS);
2879
2880                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2881         }
2882 }
2883
2884 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2885                                         u32 offset, u32 *val)
2886 {
2887         u32 tmp;
2888         int i;
2889
2890         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2891                 return -EINVAL;
2892
2893         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2894                                         EEPROM_ADDR_DEVID_MASK |
2895                                         EEPROM_ADDR_READ);
2896         tw32(GRC_EEPROM_ADDR,
2897              tmp |
2898              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2899              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2900               EEPROM_ADDR_ADDR_MASK) |
2901              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2902
2903         for (i = 0; i < 1000; i++) {
2904                 tmp = tr32(GRC_EEPROM_ADDR);
2905
2906                 if (tmp & EEPROM_ADDR_COMPLETE)
2907                         break;
2908                 msleep(1);
2909         }
2910         if (!(tmp & EEPROM_ADDR_COMPLETE))
2911                 return -EBUSY;
2912
2913         tmp = tr32(GRC_EEPROM_DATA);
2914
2915         /*
2916          * The data will always be opposite the native endian
2917          * format.  Perform a blind byteswap to compensate.
2918          */
2919         *val = swab32(tmp);
2920
2921         return 0;
2922 }
2923
2924 #define NVRAM_CMD_TIMEOUT 10000
2925
2926 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2927 {
2928         int i;
2929
2930         tw32(NVRAM_CMD, nvram_cmd);
2931         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2932                 udelay(10);
2933                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2934                         udelay(10);
2935                         break;
2936                 }
2937         }
2938
2939         if (i == NVRAM_CMD_TIMEOUT)
2940                 return -EBUSY;
2941
2942         return 0;
2943 }
2944
2945 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2946 {
2947         if (tg3_flag(tp, NVRAM) &&
2948             tg3_flag(tp, NVRAM_BUFFERED) &&
2949             tg3_flag(tp, FLASH) &&
2950             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2951             (tp->nvram_jedecnum == JEDEC_ATMEL))
2952
2953                 addr = ((addr / tp->nvram_pagesize) <<
2954                         ATMEL_AT45DB0X1B_PAGE_POS) +
2955                        (addr % tp->nvram_pagesize);
2956
2957         return addr;
2958 }
2959
2960 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2961 {
2962         if (tg3_flag(tp, NVRAM) &&
2963             tg3_flag(tp, NVRAM_BUFFERED) &&
2964             tg3_flag(tp, FLASH) &&
2965             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2966             (tp->nvram_jedecnum == JEDEC_ATMEL))
2967
2968                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2969                         tp->nvram_pagesize) +
2970                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2971
2972         return addr;
2973 }
2974
2975 /* NOTE: Data read in from NVRAM is byteswapped according to
2976  * the byteswapping settings for all other register accesses.
2977  * tg3 devices are BE devices, so on a BE machine, the data
2978  * returned will be exactly as it is seen in NVRAM.  On a LE
2979  * machine, the 32-bit value will be byteswapped.
2980  */
2981 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2982 {
2983         int ret;
2984
2985         if (!tg3_flag(tp, NVRAM))
2986                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2987
2988         offset = tg3_nvram_phys_addr(tp, offset);
2989
2990         if (offset > NVRAM_ADDR_MSK)
2991                 return -EINVAL;
2992
2993         ret = tg3_nvram_lock(tp);
2994         if (ret)
2995                 return ret;
2996
2997         tg3_enable_nvram_access(tp);
2998
2999         tw32(NVRAM_ADDR, offset);
3000         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3001                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3002
3003         if (ret == 0)
3004                 *val = tr32(NVRAM_RDDATA);
3005
3006         tg3_disable_nvram_access(tp);
3007
3008         tg3_nvram_unlock(tp);
3009
3010         return ret;
3011 }
3012
3013 /* Ensures NVRAM data is in bytestream format. */
3014 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3015 {
3016         u32 v;
3017         int res = tg3_nvram_read(tp, offset, &v);
3018         if (!res)
3019                 *val = cpu_to_be32(v);
3020         return res;
3021 }
3022
3023 #define RX_CPU_SCRATCH_BASE     0x30000
3024 #define RX_CPU_SCRATCH_SIZE     0x04000
3025 #define TX_CPU_SCRATCH_BASE     0x34000
3026 #define TX_CPU_SCRATCH_SIZE     0x04000
3027
3028 /* tp->lock is held. */
3029 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3030 {
3031         int i;
3032
3033         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3034
3035         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3036                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3037
3038                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3039                 return 0;
3040         }
3041         if (offset == RX_CPU_BASE) {
3042                 for (i = 0; i < 10000; i++) {
3043                         tw32(offset + CPU_STATE, 0xffffffff);
3044                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3045                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3046                                 break;
3047                 }
3048
3049                 tw32(offset + CPU_STATE, 0xffffffff);
3050                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3051                 udelay(10);
3052         } else {
3053                 for (i = 0; i < 10000; i++) {
3054                         tw32(offset + CPU_STATE, 0xffffffff);
3055                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3056                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3057                                 break;
3058                 }
3059         }
3060
3061         if (i >= 10000) {
3062                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3063                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3064                 return -ENODEV;
3065         }
3066
3067         /* Clear firmware's nvram arbitration. */
3068         if (tg3_flag(tp, NVRAM))
3069                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3070         return 0;
3071 }
3072
3073 struct fw_info {
3074         unsigned int fw_base;
3075         unsigned int fw_len;
3076         const __be32 *fw_data;
3077 };
3078
3079 /* tp->lock is held. */
3080 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3081                                  u32 cpu_scratch_base, int cpu_scratch_size,
3082                                  struct fw_info *info)
3083 {
3084         int err, lock_err, i;
3085         void (*write_op)(struct tg3 *, u32, u32);
3086
3087         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3088                 netdev_err(tp->dev,
3089                            "%s: Trying to load TX cpu firmware which is 5705\n",
3090                            __func__);
3091                 return -EINVAL;
3092         }
3093
3094         if (tg3_flag(tp, 5705_PLUS))
3095                 write_op = tg3_write_mem;
3096         else
3097                 write_op = tg3_write_indirect_reg32;
3098
3099         /* It is possible that bootcode is still loading at this point.
3100          * Get the nvram lock first before halting the cpu.
3101          */
3102         lock_err = tg3_nvram_lock(tp);
3103         err = tg3_halt_cpu(tp, cpu_base);
3104         if (!lock_err)
3105                 tg3_nvram_unlock(tp);
3106         if (err)
3107                 goto out;
3108
3109         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3110                 write_op(tp, cpu_scratch_base + i, 0);
3111         tw32(cpu_base + CPU_STATE, 0xffffffff);
3112         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3113         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3114                 write_op(tp, (cpu_scratch_base +
3115                               (info->fw_base & 0xffff) +
3116                               (i * sizeof(u32))),
3117                               be32_to_cpu(info->fw_data[i]));
3118
3119         err = 0;
3120
3121 out:
3122         return err;
3123 }
3124
3125 /* tp->lock is held. */
3126 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3127 {
3128         struct fw_info info;
3129         const __be32 *fw_data;
3130         int err, i;
3131
3132         fw_data = (void *)tp->fw->data;
3133
3134         /* Firmware blob starts with version numbers, followed by
3135            start address and length. We are setting complete length.
3136            length = end_address_of_bss - start_address_of_text.
3137            Remainder is the blob to be loaded contiguously
3138            from start address. */
3139
3140         info.fw_base = be32_to_cpu(fw_data[1]);
3141         info.fw_len = tp->fw->size - 12;
3142         info.fw_data = &fw_data[3];
3143
3144         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3145                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3146                                     &info);
3147         if (err)
3148                 return err;
3149
3150         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3151                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3152                                     &info);
3153         if (err)
3154                 return err;
3155
3156         /* Now startup only the RX cpu. */
3157         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3158         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3159
3160         for (i = 0; i < 5; i++) {
3161                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3162                         break;
3163                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3164                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3165                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3166                 udelay(1000);
3167         }
3168         if (i >= 5) {
3169                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3170                            "should be %08x\n", __func__,
3171                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3172                 return -ENODEV;
3173         }
3174         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3175         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3176
3177         return 0;
3178 }
3179
3180 /* tp->lock is held. */
3181 static int tg3_load_tso_firmware(struct tg3 *tp)
3182 {
3183         struct fw_info info;
3184         const __be32 *fw_data;
3185         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3186         int err, i;
3187
3188         if (tg3_flag(tp, HW_TSO_1) ||
3189             tg3_flag(tp, HW_TSO_2) ||
3190             tg3_flag(tp, HW_TSO_3))
3191                 return 0;
3192
3193         fw_data = (void *)tp->fw->data;
3194
3195         /* Firmware blob starts with version numbers, followed by
3196            start address and length. We are setting complete length.
3197            length = end_address_of_bss - start_address_of_text.
3198            Remainder is the blob to be loaded contiguously
3199            from start address. */
3200
3201         info.fw_base = be32_to_cpu(fw_data[1]);
3202         cpu_scratch_size = tp->fw_len;
3203         info.fw_len = tp->fw->size - 12;
3204         info.fw_data = &fw_data[3];
3205
3206         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3207                 cpu_base = RX_CPU_BASE;
3208                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3209         } else {
3210                 cpu_base = TX_CPU_BASE;
3211                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3212                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3213         }
3214
3215         err = tg3_load_firmware_cpu(tp, cpu_base,
3216                                     cpu_scratch_base, cpu_scratch_size,
3217                                     &info);
3218         if (err)
3219                 return err;
3220
3221         /* Now startup the cpu. */
3222         tw32(cpu_base + CPU_STATE, 0xffffffff);
3223         tw32_f(cpu_base + CPU_PC, info.fw_base);
3224
3225         for (i = 0; i < 5; i++) {
3226                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3227                         break;
3228                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3229                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3230                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3231                 udelay(1000);
3232         }
3233         if (i >= 5) {
3234                 netdev_err(tp->dev,
3235                            "%s fails to set CPU PC, is %08x should be %08x\n",
3236                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3237                 return -ENODEV;
3238         }
3239         tw32(cpu_base + CPU_STATE, 0xffffffff);
3240         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3241         return 0;
3242 }
3243
3244
3245 /* tp->lock is held. */
3246 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3247 {
3248         u32 addr_high, addr_low;
3249         int i;
3250
3251         addr_high = ((tp->dev->dev_addr[0] << 8) |
3252                      tp->dev->dev_addr[1]);
3253         addr_low = ((tp->dev->dev_addr[2] << 24) |
3254                     (tp->dev->dev_addr[3] << 16) |
3255                     (tp->dev->dev_addr[4] <<  8) |
3256                     (tp->dev->dev_addr[5] <<  0));
3257         for (i = 0; i < 4; i++) {
3258                 if (i == 1 && skip_mac_1)
3259                         continue;
3260                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3261                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3262         }
3263
3264         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3265             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3266                 for (i = 0; i < 12; i++) {
3267                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3268                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3269                 }
3270         }
3271
3272         addr_high = (tp->dev->dev_addr[0] +
3273                      tp->dev->dev_addr[1] +
3274                      tp->dev->dev_addr[2] +
3275                      tp->dev->dev_addr[3] +
3276                      tp->dev->dev_addr[4] +
3277                      tp->dev->dev_addr[5]) &
3278                 TX_BACKOFF_SEED_MASK;
3279         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3280 }
3281
3282 static void tg3_enable_register_access(struct tg3 *tp)
3283 {
3284         /*
3285          * Make sure register accesses (indirect or otherwise) will function
3286          * correctly.
3287          */
3288         pci_write_config_dword(tp->pdev,
3289                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3290 }
3291
3292 static int tg3_power_up(struct tg3 *tp)
3293 {
3294         int err;
3295
3296         tg3_enable_register_access(tp);
3297
3298         err = pci_set_power_state(tp->pdev, PCI_D0);
3299         if (!err) {
3300                 /* Switch out of Vaux if it is a NIC */
3301                 tg3_pwrsrc_switch_to_vmain(tp);
3302         } else {
3303                 netdev_err(tp->dev, "Transition to D0 failed\n");
3304         }
3305
3306         return err;
3307 }
3308
3309 static int tg3_power_down_prepare(struct tg3 *tp)
3310 {
3311         u32 misc_host_ctrl;
3312         bool device_should_wake, do_low_power;
3313
3314         tg3_enable_register_access(tp);
3315
3316         /* Restore the CLKREQ setting. */
3317         if (tg3_flag(tp, CLKREQ_BUG)) {
3318                 u16 lnkctl;
3319
3320                 pci_read_config_word(tp->pdev,
3321                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3322                                      &lnkctl);
3323                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3324                 pci_write_config_word(tp->pdev,
3325                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3326                                       lnkctl);
3327         }
3328
3329         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3330         tw32(TG3PCI_MISC_HOST_CTRL,
3331              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3332
3333         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3334                              tg3_flag(tp, WOL_ENABLE);
3335
3336         if (tg3_flag(tp, USE_PHYLIB)) {
3337                 do_low_power = false;
3338                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3339                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3340                         struct phy_device *phydev;
3341                         u32 phyid, advertising;
3342
3343                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3344
3345                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3346
3347                         tp->link_config.orig_speed = phydev->speed;
3348                         tp->link_config.orig_duplex = phydev->duplex;
3349                         tp->link_config.orig_autoneg = phydev->autoneg;
3350                         tp->link_config.orig_advertising = phydev->advertising;
3351
3352                         advertising = ADVERTISED_TP |
3353                                       ADVERTISED_Pause |
3354                                       ADVERTISED_Autoneg |
3355                                       ADVERTISED_10baseT_Half;
3356
3357                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3358                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3359                                         advertising |=
3360                                                 ADVERTISED_100baseT_Half |
3361                                                 ADVERTISED_100baseT_Full |
3362                                                 ADVERTISED_10baseT_Full;
3363                                 else
3364                                         advertising |= ADVERTISED_10baseT_Full;
3365                         }
3366
3367                         phydev->advertising = advertising;
3368
3369                         phy_start_aneg(phydev);
3370
3371                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3372                         if (phyid != PHY_ID_BCMAC131) {
3373                                 phyid &= PHY_BCM_OUI_MASK;
3374                                 if (phyid == PHY_BCM_OUI_1 ||
3375                                     phyid == PHY_BCM_OUI_2 ||
3376                                     phyid == PHY_BCM_OUI_3)
3377                                         do_low_power = true;
3378                         }
3379                 }
3380         } else {
3381                 do_low_power = true;
3382
3383                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3384                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3385                         tp->link_config.orig_speed = tp->link_config.speed;
3386                         tp->link_config.orig_duplex = tp->link_config.duplex;
3387                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
3388                 }
3389
3390                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
3391                         tp->link_config.speed = SPEED_10;
3392                         tp->link_config.duplex = DUPLEX_HALF;
3393                         tp->link_config.autoneg = AUTONEG_ENABLE;
3394                         tg3_setup_phy(tp, 0);
3395                 }
3396         }
3397
3398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3399                 u32 val;
3400
3401                 val = tr32(GRC_VCPU_EXT_CTRL);
3402                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3403         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3404                 int i;
3405                 u32 val;
3406
3407                 for (i = 0; i < 200; i++) {
3408                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3409                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3410                                 break;
3411                         msleep(1);
3412                 }
3413         }
3414         if (tg3_flag(tp, WOL_CAP))
3415                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3416                                                      WOL_DRV_STATE_SHUTDOWN |
3417                                                      WOL_DRV_WOL |
3418                                                      WOL_SET_MAGIC_PKT);
3419
3420         if (device_should_wake) {
3421                 u32 mac_mode;
3422
3423                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3424                         if (do_low_power &&
3425                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3426                                 tg3_phy_auxctl_write(tp,
3427                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3428                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3429                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3430                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3431                                 udelay(40);
3432                         }
3433
3434                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3435                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3436                         else
3437                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3438
3439                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3440                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3441                             ASIC_REV_5700) {
3442                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3443                                              SPEED_100 : SPEED_10;
3444                                 if (tg3_5700_link_polarity(tp, speed))
3445                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3446                                 else
3447                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3448                         }
3449                 } else {
3450                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3451                 }
3452
3453                 if (!tg3_flag(tp, 5750_PLUS))
3454                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3455
3456                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3457                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3458                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3459                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3460
3461                 if (tg3_flag(tp, ENABLE_APE))
3462                         mac_mode |= MAC_MODE_APE_TX_EN |
3463                                     MAC_MODE_APE_RX_EN |
3464                                     MAC_MODE_TDE_ENABLE;
3465
3466                 tw32_f(MAC_MODE, mac_mode);
3467                 udelay(100);
3468
3469                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3470                 udelay(10);
3471         }
3472
3473         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3474             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3475              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3476                 u32 base_val;
3477
3478                 base_val = tp->pci_clock_ctrl;
3479                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3480                              CLOCK_CTRL_TXCLK_DISABLE);
3481
3482                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3483                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3484         } else if (tg3_flag(tp, 5780_CLASS) ||
3485                    tg3_flag(tp, CPMU_PRESENT) ||
3486                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3487                 /* do nothing */
3488         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3489                 u32 newbits1, newbits2;
3490
3491                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3492                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3493                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3494                                     CLOCK_CTRL_TXCLK_DISABLE |
3495                                     CLOCK_CTRL_ALTCLK);
3496                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3497                 } else if (tg3_flag(tp, 5705_PLUS)) {
3498                         newbits1 = CLOCK_CTRL_625_CORE;
3499                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3500                 } else {
3501                         newbits1 = CLOCK_CTRL_ALTCLK;
3502                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3503                 }
3504
3505                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3506                             40);
3507
3508                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3509                             40);
3510
3511                 if (!tg3_flag(tp, 5705_PLUS)) {
3512                         u32 newbits3;
3513
3514                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3515                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3516                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3517                                             CLOCK_CTRL_TXCLK_DISABLE |
3518                                             CLOCK_CTRL_44MHZ_CORE);
3519                         } else {
3520                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3521                         }
3522
3523                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3524                                     tp->pci_clock_ctrl | newbits3, 40);
3525                 }
3526         }
3527
3528         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3529                 tg3_power_down_phy(tp, do_low_power);
3530
3531         tg3_frob_aux_power(tp, true);
3532
3533         /* Workaround for unstable PLL clock */
3534         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3535             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3536                 u32 val = tr32(0x7d00);
3537
3538                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3539                 tw32(0x7d00, val);
3540                 if (!tg3_flag(tp, ENABLE_ASF)) {
3541                         int err;
3542
3543                         err = tg3_nvram_lock(tp);
3544                         tg3_halt_cpu(tp, RX_CPU_BASE);
3545                         if (!err)
3546                                 tg3_nvram_unlock(tp);
3547                 }
3548         }
3549
3550         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3551
3552         return 0;
3553 }
3554
3555 static void tg3_power_down(struct tg3 *tp)
3556 {
3557         tg3_power_down_prepare(tp);
3558
3559         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3560         pci_set_power_state(tp->pdev, PCI_D3hot);
3561 }
3562
3563 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3564 {
3565         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3566         case MII_TG3_AUX_STAT_10HALF:
3567                 *speed = SPEED_10;
3568                 *duplex = DUPLEX_HALF;
3569                 break;
3570
3571         case MII_TG3_AUX_STAT_10FULL:
3572                 *speed = SPEED_10;
3573                 *duplex = DUPLEX_FULL;
3574                 break;
3575
3576         case MII_TG3_AUX_STAT_100HALF:
3577                 *speed = SPEED_100;
3578                 *duplex = DUPLEX_HALF;
3579                 break;
3580
3581         case MII_TG3_AUX_STAT_100FULL:
3582                 *speed = SPEED_100;
3583                 *duplex = DUPLEX_FULL;
3584                 break;
3585
3586         case MII_TG3_AUX_STAT_1000HALF:
3587                 *speed = SPEED_1000;
3588                 *duplex = DUPLEX_HALF;
3589                 break;
3590
3591         case MII_TG3_AUX_STAT_1000FULL:
3592                 *speed = SPEED_1000;
3593                 *duplex = DUPLEX_FULL;
3594                 break;
3595
3596         default:
3597                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3598                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3599                                  SPEED_10;
3600                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3601                                   DUPLEX_HALF;
3602                         break;
3603                 }
3604                 *speed = SPEED_INVALID;
3605                 *duplex = DUPLEX_INVALID;
3606                 break;
3607         }
3608 }
3609
3610 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3611 {
3612         int err = 0;
3613         u32 val, new_adv;
3614
3615         new_adv = ADVERTISE_CSMA;
3616         if (advertise & ADVERTISED_10baseT_Half)
3617                 new_adv |= ADVERTISE_10HALF;
3618         if (advertise & ADVERTISED_10baseT_Full)
3619                 new_adv |= ADVERTISE_10FULL;
3620         if (advertise & ADVERTISED_100baseT_Half)
3621                 new_adv |= ADVERTISE_100HALF;
3622         if (advertise & ADVERTISED_100baseT_Full)
3623                 new_adv |= ADVERTISE_100FULL;
3624
3625         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
3626
3627         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3628         if (err)
3629                 goto done;
3630
3631         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3632                 goto done;
3633
3634         new_adv = 0;
3635         if (advertise & ADVERTISED_1000baseT_Half)
3636                 new_adv |= ADVERTISE_1000HALF;
3637         if (advertise & ADVERTISED_1000baseT_Full)
3638                 new_adv |= ADVERTISE_1000FULL;
3639
3640         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3641             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3642                 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3643
3644         err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3645         if (err)
3646                 goto done;
3647
3648         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3649                 goto done;
3650
3651         tw32(TG3_CPMU_EEE_MODE,
3652              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3653
3654         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
3655         if (!err) {
3656                 u32 err2;
3657
3658                 val = 0;
3659                 /* Advertise 100-BaseTX EEE ability */
3660                 if (advertise & ADVERTISED_100baseT_Full)
3661                         val |= MDIO_AN_EEE_ADV_100TX;
3662                 /* Advertise 1000-BaseT EEE ability */
3663                 if (advertise & ADVERTISED_1000baseT_Full)
3664                         val |= MDIO_AN_EEE_ADV_1000T;
3665                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3666                 if (err)
3667                         val = 0;
3668
3669                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3670                 case ASIC_REV_5717:
3671                 case ASIC_REV_57765:
3672                 case ASIC_REV_5719:
3673                         /* If we advertised any eee advertisements above... */
3674                         if (val)
3675                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3676                                       MII_TG3_DSP_TAP26_RMRXSTO |
3677                                       MII_TG3_DSP_TAP26_OPCSINPT;
3678                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3679                         /* Fall through */
3680                 case ASIC_REV_5720:
3681                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3682                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3683                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3684                 }
3685
3686                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
3687                 if (!err)
3688                         err = err2;
3689         }
3690
3691 done:
3692         return err;
3693 }
3694
3695 static void tg3_phy_copper_begin(struct tg3 *tp)
3696 {
3697         u32 new_adv;
3698         int i;
3699
3700         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3701                 new_adv = ADVERTISED_10baseT_Half |
3702                           ADVERTISED_10baseT_Full;
3703                 if (tg3_flag(tp, WOL_SPEED_100MB))
3704                         new_adv |= ADVERTISED_100baseT_Half |
3705                                    ADVERTISED_100baseT_Full;
3706
3707                 tg3_phy_autoneg_cfg(tp, new_adv,
3708                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3709         } else if (tp->link_config.speed == SPEED_INVALID) {
3710                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3711                         tp->link_config.advertising &=
3712                                 ~(ADVERTISED_1000baseT_Half |
3713                                   ADVERTISED_1000baseT_Full);
3714
3715                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3716                                     tp->link_config.flowctrl);
3717         } else {
3718                 /* Asking for a specific link mode. */
3719                 if (tp->link_config.speed == SPEED_1000) {
3720                         if (tp->link_config.duplex == DUPLEX_FULL)
3721                                 new_adv = ADVERTISED_1000baseT_Full;
3722                         else
3723                                 new_adv = ADVERTISED_1000baseT_Half;
3724                 } else if (tp->link_config.speed == SPEED_100) {
3725                         if (tp->link_config.duplex == DUPLEX_FULL)
3726                                 new_adv = ADVERTISED_100baseT_Full;
3727                         else
3728                                 new_adv = ADVERTISED_100baseT_Half;
3729                 } else {
3730                         if (tp->link_config.duplex == DUPLEX_FULL)
3731                                 new_adv = ADVERTISED_10baseT_Full;
3732                         else
3733                                 new_adv = ADVERTISED_10baseT_Half;
3734                 }
3735
3736                 tg3_phy_autoneg_cfg(tp, new_adv,
3737                                     tp->link_config.flowctrl);
3738         }
3739
3740         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3741             tp->link_config.speed != SPEED_INVALID) {
3742                 u32 bmcr, orig_bmcr;
3743
3744                 tp->link_config.active_speed = tp->link_config.speed;
3745                 tp->link_config.active_duplex = tp->link_config.duplex;
3746
3747                 bmcr = 0;
3748                 switch (tp->link_config.speed) {
3749                 default:
3750                 case SPEED_10:
3751                         break;
3752
3753                 case SPEED_100:
3754                         bmcr |= BMCR_SPEED100;
3755                         break;
3756
3757                 case SPEED_1000:
3758                         bmcr |= BMCR_SPEED1000;
3759                         break;
3760                 }
3761
3762                 if (tp->link_config.duplex == DUPLEX_FULL)
3763                         bmcr |= BMCR_FULLDPLX;
3764
3765                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3766                     (bmcr != orig_bmcr)) {
3767                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3768                         for (i = 0; i < 1500; i++) {
3769                                 u32 tmp;
3770
3771                                 udelay(10);
3772                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3773                                     tg3_readphy(tp, MII_BMSR, &tmp))
3774                                         continue;
3775                                 if (!(tmp & BMSR_LSTATUS)) {
3776                                         udelay(40);
3777                                         break;
3778                                 }
3779                         }
3780                         tg3_writephy(tp, MII_BMCR, bmcr);
3781                         udelay(40);
3782                 }
3783         } else {
3784                 tg3_writephy(tp, MII_BMCR,
3785                              BMCR_ANENABLE | BMCR_ANRESTART);
3786         }
3787 }
3788
3789 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3790 {
3791         int err;
3792
3793         /* Turn off tap power management. */
3794         /* Set Extended packet length bit */
3795         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3796
3797         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3798         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3799         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3800         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3801         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3802
3803         udelay(40);
3804
3805         return err;
3806 }
3807
3808 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3809 {
3810         u32 adv_reg, all_mask = 0;
3811
3812         if (mask & ADVERTISED_10baseT_Half)
3813                 all_mask |= ADVERTISE_10HALF;
3814         if (mask & ADVERTISED_10baseT_Full)
3815                 all_mask |= ADVERTISE_10FULL;
3816         if (mask & ADVERTISED_100baseT_Half)
3817                 all_mask |= ADVERTISE_100HALF;
3818         if (mask & ADVERTISED_100baseT_Full)
3819                 all_mask |= ADVERTISE_100FULL;
3820
3821         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3822                 return 0;
3823
3824         if ((adv_reg & ADVERTISE_ALL) != all_mask)
3825                 return 0;
3826
3827         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3828                 u32 tg3_ctrl;
3829
3830                 all_mask = 0;
3831                 if (mask & ADVERTISED_1000baseT_Half)
3832                         all_mask |= ADVERTISE_1000HALF;
3833                 if (mask & ADVERTISED_1000baseT_Full)
3834                         all_mask |= ADVERTISE_1000FULL;
3835
3836                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
3837                         return 0;
3838
3839                 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
3840                 if (tg3_ctrl != all_mask)
3841                         return 0;
3842         }
3843
3844         return 1;
3845 }
3846
3847 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3848 {
3849         u32 curadv, reqadv;
3850
3851         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3852                 return 1;
3853
3854         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3855         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3856
3857         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3858                 if (curadv != reqadv)
3859                         return 0;
3860
3861                 if (tg3_flag(tp, PAUSE_AUTONEG))
3862                         tg3_readphy(tp, MII_LPA, rmtadv);
3863         } else {
3864                 /* Reprogram the advertisement register, even if it
3865                  * does not affect the current link.  If the link
3866                  * gets renegotiated in the future, we can save an
3867                  * additional renegotiation cycle by advertising
3868                  * it correctly in the first place.
3869                  */
3870                 if (curadv != reqadv) {
3871                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3872                                      ADVERTISE_PAUSE_ASYM);
3873                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3874                 }
3875         }
3876
3877         return 1;
3878 }
3879
3880 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3881 {
3882         int current_link_up;
3883         u32 bmsr, val;
3884         u32 lcl_adv, rmt_adv;
3885         u16 current_speed;
3886         u8 current_duplex;
3887         int i, err;
3888
3889         tw32(MAC_EVENT, 0);
3890
3891         tw32_f(MAC_STATUS,
3892              (MAC_STATUS_SYNC_CHANGED |
3893               MAC_STATUS_CFG_CHANGED |
3894               MAC_STATUS_MI_COMPLETION |
3895               MAC_STATUS_LNKSTATE_CHANGED));
3896         udelay(40);
3897
3898         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3899                 tw32_f(MAC_MI_MODE,
3900                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3901                 udelay(80);
3902         }
3903
3904         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3905
3906         /* Some third-party PHYs need to be reset on link going
3907          * down.
3908          */
3909         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3910              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3911              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3912             netif_carrier_ok(tp->dev)) {
3913                 tg3_readphy(tp, MII_BMSR, &bmsr);
3914                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3915                     !(bmsr & BMSR_LSTATUS))
3916                         force_reset = 1;
3917         }
3918         if (force_reset)
3919                 tg3_phy_reset(tp);
3920
3921         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3922                 tg3_readphy(tp, MII_BMSR, &bmsr);
3923                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3924                     !tg3_flag(tp, INIT_COMPLETE))
3925                         bmsr = 0;
3926
3927                 if (!(bmsr & BMSR_LSTATUS)) {
3928                         err = tg3_init_5401phy_dsp(tp);
3929                         if (err)
3930                                 return err;
3931
3932                         tg3_readphy(tp, MII_BMSR, &bmsr);
3933                         for (i = 0; i < 1000; i++) {
3934                                 udelay(10);
3935                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3936                                     (bmsr & BMSR_LSTATUS)) {
3937                                         udelay(40);
3938                                         break;
3939                                 }
3940                         }
3941
3942                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3943                             TG3_PHY_REV_BCM5401_B0 &&
3944                             !(bmsr & BMSR_LSTATUS) &&
3945                             tp->link_config.active_speed == SPEED_1000) {
3946                                 err = tg3_phy_reset(tp);
3947                                 if (!err)
3948                                         err = tg3_init_5401phy_dsp(tp);
3949                                 if (err)
3950                                         return err;
3951                         }
3952                 }
3953         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3954                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3955                 /* 5701 {A0,B0} CRC bug workaround */
3956                 tg3_writephy(tp, 0x15, 0x0a75);
3957                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3958                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3959                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3960         }
3961
3962         /* Clear pending interrupts... */
3963         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3964         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3965
3966         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3967                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3968         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3969                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3970
3971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3973                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3974                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3975                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3976                 else
3977                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3978         }
3979
3980         current_link_up = 0;
3981         current_speed = SPEED_INVALID;
3982         current_duplex = DUPLEX_INVALID;
3983
3984         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3985                 err = tg3_phy_auxctl_read(tp,
3986                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3987                                           &val);
3988                 if (!err && !(val & (1 << 10))) {
3989                         tg3_phy_auxctl_write(tp,
3990                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3991                                              val | (1 << 10));
3992                         goto relink;
3993                 }
3994         }
3995
3996         bmsr = 0;
3997         for (i = 0; i < 100; i++) {
3998                 tg3_readphy(tp, MII_BMSR, &bmsr);
3999                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4000                     (bmsr & BMSR_LSTATUS))
4001                         break;
4002                 udelay(40);
4003         }
4004
4005         if (bmsr & BMSR_LSTATUS) {
4006                 u32 aux_stat, bmcr;
4007
4008                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4009                 for (i = 0; i < 2000; i++) {
4010                         udelay(10);
4011                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4012                             aux_stat)
4013                                 break;
4014                 }
4015
4016                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4017                                              &current_speed,
4018                                              &current_duplex);
4019
4020                 bmcr = 0;
4021                 for (i = 0; i < 200; i++) {
4022                         tg3_readphy(tp, MII_BMCR, &bmcr);
4023                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4024                                 continue;
4025                         if (bmcr && bmcr != 0x7fff)
4026                                 break;
4027                         udelay(10);
4028                 }
4029
4030                 lcl_adv = 0;
4031                 rmt_adv = 0;
4032
4033                 tp->link_config.active_speed = current_speed;
4034                 tp->link_config.active_duplex = current_duplex;
4035
4036                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4037                         if ((bmcr & BMCR_ANENABLE) &&
4038                             tg3_copper_is_advertising_all(tp,
4039                                                 tp->link_config.advertising)) {
4040                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
4041                                                                   &rmt_adv))
4042                                         current_link_up = 1;
4043                         }
4044                 } else {
4045                         if (!(bmcr & BMCR_ANENABLE) &&
4046                             tp->link_config.speed == current_speed &&
4047                             tp->link_config.duplex == current_duplex &&
4048                             tp->link_config.flowctrl ==
4049                             tp->link_config.active_flowctrl) {
4050                                 current_link_up = 1;
4051                         }
4052                 }
4053
4054                 if (current_link_up == 1 &&
4055                     tp->link_config.active_duplex == DUPLEX_FULL)
4056                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4057         }
4058
4059 relink:
4060         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4061                 tg3_phy_copper_begin(tp);
4062
4063                 tg3_readphy(tp, MII_BMSR, &bmsr);
4064                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4065                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4066                         current_link_up = 1;
4067         }
4068
4069         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4070         if (current_link_up == 1) {
4071                 if (tp->link_config.active_speed == SPEED_100 ||
4072                     tp->link_config.active_speed == SPEED_10)
4073                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4074                 else
4075                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4076         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4077                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4078         else
4079                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4080
4081         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4082         if (tp->link_config.active_duplex == DUPLEX_HALF)
4083                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4084
4085         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4086                 if (current_link_up == 1 &&
4087                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4088                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4089                 else
4090                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4091         }
4092
4093         /* ??? Without this setting Netgear GA302T PHY does not
4094          * ??? send/receive packets...
4095          */
4096         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4097             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4098                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4099                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4100                 udelay(80);
4101         }
4102
4103         tw32_f(MAC_MODE, tp->mac_mode);
4104         udelay(40);
4105
4106         tg3_phy_eee_adjust(tp, current_link_up);
4107
4108         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4109                 /* Polled via timer. */
4110                 tw32_f(MAC_EVENT, 0);
4111         } else {
4112                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4113         }
4114         udelay(40);
4115
4116         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4117             current_link_up == 1 &&
4118             tp->link_config.active_speed == SPEED_1000 &&
4119             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4120                 udelay(120);
4121                 tw32_f(MAC_STATUS,
4122                      (MAC_STATUS_SYNC_CHANGED |
4123                       MAC_STATUS_CFG_CHANGED));
4124                 udelay(40);
4125                 tg3_write_mem(tp,
4126                               NIC_SRAM_FIRMWARE_MBOX,
4127                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4128         }
4129
4130         /* Prevent send BD corruption. */
4131         if (tg3_flag(tp, CLKREQ_BUG)) {
4132                 u16 oldlnkctl, newlnkctl;
4133
4134                 pci_read_config_word(tp->pdev,
4135                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4136                                      &oldlnkctl);
4137                 if (tp->link_config.active_speed == SPEED_100 ||
4138                     tp->link_config.active_speed == SPEED_10)
4139                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4140                 else
4141                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4142                 if (newlnkctl != oldlnkctl)
4143                         pci_write_config_word(tp->pdev,
4144                                               pci_pcie_cap(tp->pdev) +
4145                                               PCI_EXP_LNKCTL, newlnkctl);
4146         }
4147
4148         if (current_link_up != netif_carrier_ok(tp->dev)) {
4149                 if (current_link_up)
4150                         netif_carrier_on(tp->dev);
4151                 else
4152                         netif_carrier_off(tp->dev);
4153                 tg3_link_report(tp);
4154         }
4155
4156         return 0;
4157 }
4158
4159 struct tg3_fiber_aneginfo {
4160         int state;
4161 #define ANEG_STATE_UNKNOWN              0
4162 #define ANEG_STATE_AN_ENABLE            1
4163 #define ANEG_STATE_RESTART_INIT         2
4164 #define ANEG_STATE_RESTART              3
4165 #define ANEG_STATE_DISABLE_LINK_OK      4
4166 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4167 #define ANEG_STATE_ABILITY_DETECT       6
4168 #define ANEG_STATE_ACK_DETECT_INIT      7
4169 #define ANEG_STATE_ACK_DETECT           8
4170 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4171 #define ANEG_STATE_COMPLETE_ACK         10
4172 #define ANEG_STATE_IDLE_DETECT_INIT     11
4173 #define ANEG_STATE_IDLE_DETECT          12
4174 #define ANEG_STATE_LINK_OK              13
4175 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4176 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4177
4178         u32 flags;
4179 #define MR_AN_ENABLE            0x00000001
4180 #define MR_RESTART_AN           0x00000002
4181 #define MR_AN_COMPLETE          0x00000004
4182 #define MR_PAGE_RX              0x00000008
4183 #define MR_NP_LOADED            0x00000010
4184 #define MR_TOGGLE_TX            0x00000020
4185 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4186 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4187 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4188 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4189 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4190 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4191 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4192 #define MR_TOGGLE_RX            0x00002000
4193 #define MR_NP_RX                0x00004000
4194
4195 #define MR_LINK_OK              0x80000000
4196
4197         unsigned long link_time, cur_time;
4198
4199         u32 ability_match_cfg;
4200         int ability_match_count;
4201
4202         char ability_match, idle_match, ack_match;
4203
4204         u32 txconfig, rxconfig;
4205 #define ANEG_CFG_NP             0x00000080
4206 #define ANEG_CFG_ACK            0x00000040
4207 #define ANEG_CFG_RF2            0x00000020
4208 #define ANEG_CFG_RF1            0x00000010
4209 #define ANEG_CFG_PS2            0x00000001
4210 #define ANEG_CFG_PS1            0x00008000
4211 #define ANEG_CFG_HD             0x00004000
4212 #define ANEG_CFG_FD             0x00002000
4213 #define ANEG_CFG_INVAL          0x00001f06
4214
4215 };
4216 #define ANEG_OK         0
4217 #define ANEG_DONE       1
4218 #define ANEG_TIMER_ENAB 2
4219 #define ANEG_FAILED     -1
4220
4221 #define ANEG_STATE_SETTLE_TIME  10000
4222
4223 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4224                                    struct tg3_fiber_aneginfo *ap)
4225 {
4226         u16 flowctrl;
4227         unsigned long delta;
4228         u32 rx_cfg_reg;
4229         int ret;
4230
4231         if (ap->state == ANEG_STATE_UNKNOWN) {
4232                 ap->rxconfig = 0;
4233                 ap->link_time = 0;
4234                 ap->cur_time = 0;
4235                 ap->ability_match_cfg = 0;
4236                 ap->ability_match_count = 0;
4237                 ap->ability_match = 0;
4238                 ap->idle_match = 0;
4239                 ap->ack_match = 0;
4240         }
4241         ap->cur_time++;
4242
4243         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4244                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4245
4246                 if (rx_cfg_reg != ap->ability_match_cfg) {
4247                         ap->ability_match_cfg = rx_cfg_reg;
4248                         ap->ability_match = 0;
4249                         ap->ability_match_count = 0;
4250                 } else {
4251                         if (++ap->ability_match_count > 1) {
4252                                 ap->ability_match = 1;
4253                                 ap->ability_match_cfg = rx_cfg_reg;
4254                         }
4255                 }
4256                 if (rx_cfg_reg & ANEG_CFG_ACK)
4257                         ap->ack_match = 1;
4258                 else
4259                         ap->ack_match = 0;
4260
4261                 ap->idle_match = 0;
4262         } else {
4263                 ap->idle_match = 1;
4264                 ap->ability_match_cfg = 0;
4265                 ap->ability_match_count = 0;
4266                 ap->ability_match = 0;
4267                 ap->ack_match = 0;
4268
4269                 rx_cfg_reg = 0;
4270         }
4271
4272         ap->rxconfig = rx_cfg_reg;
4273         ret = ANEG_OK;
4274
4275         switch (ap->state) {
4276         case ANEG_STATE_UNKNOWN:
4277                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4278                         ap->state = ANEG_STATE_AN_ENABLE;
4279
4280                 /* fallthru */
4281         case ANEG_STATE_AN_ENABLE:
4282                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4283                 if (ap->flags & MR_AN_ENABLE) {
4284                         ap->link_time = 0;
4285                         ap->cur_time = 0;
4286                         ap->ability_match_cfg = 0;
4287                         ap->ability_match_count = 0;
4288                         ap->ability_match = 0;
4289                         ap->idle_match = 0;
4290                         ap->ack_match = 0;
4291
4292                         ap->state = ANEG_STATE_RESTART_INIT;
4293                 } else {
4294                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4295                 }
4296                 break;
4297
4298         case ANEG_STATE_RESTART_INIT:
4299                 ap->link_time = ap->cur_time;
4300                 ap->flags &= ~(MR_NP_LOADED);
4301                 ap->txconfig = 0;
4302                 tw32(MAC_TX_AUTO_NEG, 0);
4303                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4304                 tw32_f(MAC_MODE, tp->mac_mode);
4305                 udelay(40);
4306
4307                 ret = ANEG_TIMER_ENAB;
4308                 ap->state = ANEG_STATE_RESTART;
4309
4310                 /* fallthru */
4311         case ANEG_STATE_RESTART:
4312                 delta = ap->cur_time - ap->link_time;
4313                 if (delta > ANEG_STATE_SETTLE_TIME)
4314                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4315                 else
4316                         ret = ANEG_TIMER_ENAB;
4317                 break;
4318
4319         case ANEG_STATE_DISABLE_LINK_OK:
4320                 ret = ANEG_DONE;
4321                 break;
4322
4323         case ANEG_STATE_ABILITY_DETECT_INIT:
4324                 ap->flags &= ~(MR_TOGGLE_TX);
4325                 ap->txconfig = ANEG_CFG_FD;
4326                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4327                 if (flowctrl & ADVERTISE_1000XPAUSE)
4328                         ap->txconfig |= ANEG_CFG_PS1;
4329                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4330                         ap->txconfig |= ANEG_CFG_PS2;
4331                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4332                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4333                 tw32_f(MAC_MODE, tp->mac_mode);
4334                 udelay(40);
4335
4336                 ap->state = ANEG_STATE_ABILITY_DETECT;
4337                 break;
4338
4339         case ANEG_STATE_ABILITY_DETECT:
4340                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4341                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4342                 break;
4343
4344         case ANEG_STATE_ACK_DETECT_INIT:
4345                 ap->txconfig |= ANEG_CFG_ACK;
4346                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4347                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4348                 tw32_f(MAC_MODE, tp->mac_mode);
4349                 udelay(40);
4350
4351                 ap->state = ANEG_STATE_ACK_DETECT;
4352
4353                 /* fallthru */
4354         case ANEG_STATE_ACK_DETECT:
4355                 if (ap->ack_match != 0) {
4356                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4357                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4358                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4359                         } else {
4360                                 ap->state = ANEG_STATE_AN_ENABLE;
4361                         }
4362                 } else if (ap->ability_match != 0 &&
4363                            ap->rxconfig == 0) {
4364                         ap->state = ANEG_STATE_AN_ENABLE;
4365                 }
4366                 break;
4367
4368         case ANEG_STATE_COMPLETE_ACK_INIT:
4369                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4370                         ret = ANEG_FAILED;
4371                         break;
4372                 }
4373                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4374                                MR_LP_ADV_HALF_DUPLEX |
4375                                MR_LP_ADV_SYM_PAUSE |
4376                                MR_LP_ADV_ASYM_PAUSE |
4377                                MR_LP_ADV_REMOTE_FAULT1 |
4378                                MR_LP_ADV_REMOTE_FAULT2 |
4379                                MR_LP_ADV_NEXT_PAGE |
4380                                MR_TOGGLE_RX |
4381                                MR_NP_RX);
4382                 if (ap->rxconfig & ANEG_CFG_FD)
4383                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4384                 if (ap->rxconfig & ANEG_CFG_HD)
4385                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4386                 if (ap->rxconfig & ANEG_CFG_PS1)
4387                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4388                 if (ap->rxconfig & ANEG_CFG_PS2)
4389                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4390                 if (ap->rxconfig & ANEG_CFG_RF1)
4391                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4392                 if (ap->rxconfig & ANEG_CFG_RF2)
4393                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4394                 if (ap->rxconfig & ANEG_CFG_NP)
4395                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4396
4397                 ap->link_time = ap->cur_time;
4398
4399                 ap->flags ^= (MR_TOGGLE_TX);
4400                 if (ap->rxconfig & 0x0008)
4401                         ap->flags |= MR_TOGGLE_RX;
4402                 if (ap->rxconfig & ANEG_CFG_NP)
4403                         ap->flags |= MR_NP_RX;
4404                 ap->flags |= MR_PAGE_RX;
4405
4406                 ap->state = ANEG_STATE_COMPLETE_ACK;
4407                 ret = ANEG_TIMER_ENAB;
4408                 break;
4409
4410         case ANEG_STATE_COMPLETE_ACK:
4411                 if (ap->ability_match != 0 &&
4412                     ap->rxconfig == 0) {
4413                         ap->state = ANEG_STATE_AN_ENABLE;
4414                         break;
4415                 }
4416                 delta = ap->cur_time - ap->link_time;
4417                 if (delta > ANEG_STATE_SETTLE_TIME) {
4418                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4419                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4420                         } else {
4421                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4422                                     !(ap->flags & MR_NP_RX)) {
4423                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4424                                 } else {
4425                                         ret = ANEG_FAILED;
4426                                 }
4427                         }
4428                 }
4429                 break;
4430
4431         case ANEG_STATE_IDLE_DETECT_INIT:
4432                 ap->link_time = ap->cur_time;
4433                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4434                 tw32_f(MAC_MODE, tp->mac_mode);
4435                 udelay(40);
4436
4437                 ap->state = ANEG_STATE_IDLE_DETECT;
4438                 ret = ANEG_TIMER_ENAB;
4439                 break;
4440
4441         case ANEG_STATE_IDLE_DETECT:
4442                 if (ap->ability_match != 0 &&
4443                     ap->rxconfig == 0) {
4444                         ap->state = ANEG_STATE_AN_ENABLE;
4445                         break;
4446                 }
4447                 delta = ap->cur_time - ap->link_time;
4448                 if (delta > ANEG_STATE_SETTLE_TIME) {
4449                         /* XXX another gem from the Broadcom driver :( */
4450                         ap->state = ANEG_STATE_LINK_OK;
4451                 }
4452                 break;
4453
4454         case ANEG_STATE_LINK_OK:
4455                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4456                 ret = ANEG_DONE;
4457                 break;
4458
4459         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4460                 /* ??? unimplemented */
4461                 break;
4462
4463         case ANEG_STATE_NEXT_PAGE_WAIT:
4464                 /* ??? unimplemented */
4465                 break;
4466
4467         default:
4468                 ret = ANEG_FAILED;
4469                 break;
4470         }
4471
4472         return ret;
4473 }
4474
4475 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4476 {
4477         int res = 0;
4478         struct tg3_fiber_aneginfo aninfo;
4479         int status = ANEG_FAILED;
4480         unsigned int tick;
4481         u32 tmp;
4482
4483         tw32_f(MAC_TX_AUTO_NEG, 0);
4484
4485         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4486         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4487         udelay(40);
4488
4489         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4490         udelay(40);
4491
4492         memset(&aninfo, 0, sizeof(aninfo));
4493         aninfo.flags |= MR_AN_ENABLE;
4494         aninfo.state = ANEG_STATE_UNKNOWN;
4495         aninfo.cur_time = 0;
4496         tick = 0;
4497         while (++tick < 195000) {
4498                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4499                 if (status == ANEG_DONE || status == ANEG_FAILED)
4500                         break;
4501
4502                 udelay(1);
4503         }
4504
4505         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4506         tw32_f(MAC_MODE, tp->mac_mode);
4507         udelay(40);
4508
4509         *txflags = aninfo.txconfig;
4510         *rxflags = aninfo.flags;
4511
4512         if (status == ANEG_DONE &&
4513             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4514                              MR_LP_ADV_FULL_DUPLEX)))
4515                 res = 1;
4516
4517         return res;
4518 }
4519
4520 static void tg3_init_bcm8002(struct tg3 *tp)
4521 {
4522         u32 mac_status = tr32(MAC_STATUS);
4523         int i;
4524
4525         /* Reset when initting first time or we have a link. */
4526         if (tg3_flag(tp, INIT_COMPLETE) &&
4527             !(mac_status & MAC_STATUS_PCS_SYNCED))
4528                 return;
4529
4530         /* Set PLL lock range. */
4531         tg3_writephy(tp, 0x16, 0x8007);
4532
4533         /* SW reset */
4534         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4535
4536         /* Wait for reset to complete. */
4537         /* XXX schedule_timeout() ... */
4538         for (i = 0; i < 500; i++)
4539                 udelay(10);
4540
4541         /* Config mode; select PMA/Ch 1 regs. */
4542         tg3_writephy(tp, 0x10, 0x8411);
4543
4544         /* Enable auto-lock and comdet, select txclk for tx. */
4545         tg3_writephy(tp, 0x11, 0x0a10);
4546
4547         tg3_writephy(tp, 0x18, 0x00a0);
4548         tg3_writephy(tp, 0x16, 0x41ff);
4549
4550         /* Assert and deassert POR. */
4551         tg3_writephy(tp, 0x13, 0x0400);
4552         udelay(40);
4553         tg3_writephy(tp, 0x13, 0x0000);
4554
4555         tg3_writephy(tp, 0x11, 0x0a50);
4556         udelay(40);
4557         tg3_writephy(tp, 0x11, 0x0a10);
4558
4559         /* Wait for signal to stabilize */
4560         /* XXX schedule_timeout() ... */
4561         for (i = 0; i < 15000; i++)
4562                 udelay(10);
4563
4564         /* Deselect the channel register so we can read the PHYID
4565          * later.
4566          */
4567         tg3_writephy(tp, 0x10, 0x8011);
4568 }
4569
4570 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4571 {
4572         u16 flowctrl;
4573         u32 sg_dig_ctrl, sg_dig_status;
4574         u32 serdes_cfg, expected_sg_dig_ctrl;
4575         int workaround, port_a;
4576         int current_link_up;
4577
4578         serdes_cfg = 0;
4579         expected_sg_dig_ctrl = 0;
4580         workaround = 0;
4581         port_a = 1;
4582         current_link_up = 0;
4583
4584         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4585             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4586                 workaround = 1;
4587                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4588                         port_a = 0;
4589
4590                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4591                 /* preserve bits 20-23 for voltage regulator */
4592                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4593         }
4594
4595         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4596
4597         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4598                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4599                         if (workaround) {
4600                                 u32 val = serdes_cfg;
4601
4602                                 if (port_a)
4603                                         val |= 0xc010000;
4604                                 else
4605                                         val |= 0x4010000;
4606                                 tw32_f(MAC_SERDES_CFG, val);
4607                         }
4608
4609                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4610                 }
4611                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4612                         tg3_setup_flow_control(tp, 0, 0);
4613                         current_link_up = 1;
4614                 }
4615                 goto out;
4616         }
4617
4618         /* Want auto-negotiation.  */
4619         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4620
4621         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4622         if (flowctrl & ADVERTISE_1000XPAUSE)
4623                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4624         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4625                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4626
4627         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4628                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4629                     tp->serdes_counter &&
4630                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4631                                     MAC_STATUS_RCVD_CFG)) ==
4632                      MAC_STATUS_PCS_SYNCED)) {
4633                         tp->serdes_counter--;
4634                         current_link_up = 1;
4635                         goto out;
4636                 }
4637 restart_autoneg:
4638                 if (workaround)
4639                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4640                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4641                 udelay(5);
4642                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4643
4644                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4645                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4646         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4647                                  MAC_STATUS_SIGNAL_DET)) {
4648                 sg_dig_status = tr32(SG_DIG_STATUS);
4649                 mac_status = tr32(MAC_STATUS);
4650
4651                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4652                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4653                         u32 local_adv = 0, remote_adv = 0;
4654
4655                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4656                                 local_adv |= ADVERTISE_1000XPAUSE;
4657                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4658                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4659
4660                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4661                                 remote_adv |= LPA_1000XPAUSE;
4662                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4663                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4664
4665                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4666                         current_link_up = 1;
4667                         tp->serdes_counter = 0;
4668                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4669                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4670                         if (tp->serdes_counter)
4671                                 tp->serdes_counter--;
4672                         else {
4673                                 if (workaround) {
4674                                         u32 val = serdes_cfg;
4675
4676                                         if (port_a)
4677                                                 val |= 0xc010000;
4678                                         else
4679                                                 val |= 0x4010000;
4680
4681                                         tw32_f(MAC_SERDES_CFG, val);
4682                                 }
4683
4684                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4685                                 udelay(40);
4686
4687                                 /* Link parallel detection - link is up */
4688                                 /* only if we have PCS_SYNC and not */
4689                                 /* receiving config code words */
4690                                 mac_status = tr32(MAC_STATUS);
4691                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4692                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4693                                         tg3_setup_flow_control(tp, 0, 0);
4694                                         current_link_up = 1;
4695                                         tp->phy_flags |=
4696                                                 TG3_PHYFLG_PARALLEL_DETECT;
4697                                         tp->serdes_counter =
4698                                                 SERDES_PARALLEL_DET_TIMEOUT;
4699                                 } else
4700                                         goto restart_autoneg;
4701                         }
4702                 }
4703         } else {
4704                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4705                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4706         }
4707
4708 out:
4709         return current_link_up;
4710 }
4711
4712 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4713 {
4714         int current_link_up = 0;
4715
4716         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4717                 goto out;
4718
4719         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4720                 u32 txflags, rxflags;
4721                 int i;
4722
4723                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4724                         u32 local_adv = 0, remote_adv = 0;
4725
4726                         if (txflags & ANEG_CFG_PS1)
4727                                 local_adv |= ADVERTISE_1000XPAUSE;
4728                         if (txflags & ANEG_CFG_PS2)
4729                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4730
4731                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4732                                 remote_adv |= LPA_1000XPAUSE;
4733                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4734                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4735
4736                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4737
4738                         current_link_up = 1;
4739                 }
4740                 for (i = 0; i < 30; i++) {
4741                         udelay(20);
4742                         tw32_f(MAC_STATUS,
4743                                (MAC_STATUS_SYNC_CHANGED |
4744                                 MAC_STATUS_CFG_CHANGED));
4745                         udelay(40);
4746                         if ((tr32(MAC_STATUS) &
4747                              (MAC_STATUS_SYNC_CHANGED |
4748                               MAC_STATUS_CFG_CHANGED)) == 0)
4749                                 break;
4750                 }
4751
4752                 mac_status = tr32(MAC_STATUS);
4753                 if (current_link_up == 0 &&
4754                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4755                     !(mac_status & MAC_STATUS_RCVD_CFG))
4756                         current_link_up = 1;
4757         } else {
4758                 tg3_setup_flow_control(tp, 0, 0);
4759
4760                 /* Forcing 1000FD link up. */
4761                 current_link_up = 1;
4762
4763                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4764                 udelay(40);
4765
4766                 tw32_f(MAC_MODE, tp->mac_mode);
4767                 udelay(40);
4768         }
4769
4770 out:
4771         return current_link_up;
4772 }
4773
4774 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4775 {
4776         u32 orig_pause_cfg;
4777         u16 orig_active_speed;
4778         u8 orig_active_duplex;
4779         u32 mac_status;
4780         int current_link_up;
4781         int i;
4782
4783         orig_pause_cfg = tp->link_config.active_flowctrl;
4784         orig_active_speed = tp->link_config.active_speed;
4785         orig_active_duplex = tp->link_config.active_duplex;
4786
4787         if (!tg3_flag(tp, HW_AUTONEG) &&
4788             netif_carrier_ok(tp->dev) &&
4789             tg3_flag(tp, INIT_COMPLETE)) {
4790                 mac_status = tr32(MAC_STATUS);
4791                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4792                                MAC_STATUS_SIGNAL_DET |
4793                                MAC_STATUS_CFG_CHANGED |
4794                                MAC_STATUS_RCVD_CFG);
4795                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4796                                    MAC_STATUS_SIGNAL_DET)) {
4797                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4798                                             MAC_STATUS_CFG_CHANGED));
4799                         return 0;
4800                 }
4801         }
4802
4803         tw32_f(MAC_TX_AUTO_NEG, 0);
4804
4805         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4806         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4807         tw32_f(MAC_MODE, tp->mac_mode);
4808         udelay(40);
4809
4810         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4811                 tg3_init_bcm8002(tp);
4812
4813         /* Enable link change event even when serdes polling.  */
4814         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4815         udelay(40);
4816
4817         current_link_up = 0;
4818         mac_status = tr32(MAC_STATUS);
4819
4820         if (tg3_flag(tp, HW_AUTONEG))
4821                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4822         else
4823                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4824
4825         tp->napi[0].hw_status->status =
4826                 (SD_STATUS_UPDATED |
4827                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4828
4829         for (i = 0; i < 100; i++) {
4830                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4831                                     MAC_STATUS_CFG_CHANGED));
4832                 udelay(5);
4833                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4834                                          MAC_STATUS_CFG_CHANGED |
4835                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4836                         break;
4837         }
4838
4839         mac_status = tr32(MAC_STATUS);
4840         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4841                 current_link_up = 0;
4842                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4843                     tp->serdes_counter == 0) {
4844                         tw32_f(MAC_MODE, (tp->mac_mode |
4845                                           MAC_MODE_SEND_CONFIGS));
4846                         udelay(1);
4847                         tw32_f(MAC_MODE, tp->mac_mode);
4848                 }
4849         }
4850
4851         if (current_link_up == 1) {
4852                 tp->link_config.active_speed = SPEED_1000;
4853                 tp->link_config.active_duplex = DUPLEX_FULL;
4854                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4855                                     LED_CTRL_LNKLED_OVERRIDE |
4856                                     LED_CTRL_1000MBPS_ON));
4857         } else {
4858                 tp->link_config.active_speed = SPEED_INVALID;
4859                 tp->link_config.active_duplex = DUPLEX_INVALID;
4860                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4861                                     LED_CTRL_LNKLED_OVERRIDE |
4862                                     LED_CTRL_TRAFFIC_OVERRIDE));
4863         }
4864
4865         if (current_link_up != netif_carrier_ok(tp->dev)) {
4866                 if (current_link_up)
4867                         netif_carrier_on(tp->dev);
4868                 else
4869                         netif_carrier_off(tp->dev);
4870                 tg3_link_report(tp);
4871         } else {
4872                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4873                 if (orig_pause_cfg != now_pause_cfg ||
4874                     orig_active_speed != tp->link_config.active_speed ||
4875                     orig_active_duplex != tp->link_config.active_duplex)
4876                         tg3_link_report(tp);
4877         }
4878
4879         return 0;
4880 }
4881
4882 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4883 {
4884         int current_link_up, err = 0;
4885         u32 bmsr, bmcr;
4886         u16 current_speed;
4887         u8 current_duplex;
4888         u32 local_adv, remote_adv;
4889
4890         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4891         tw32_f(MAC_MODE, tp->mac_mode);
4892         udelay(40);
4893
4894         tw32(MAC_EVENT, 0);
4895
4896         tw32_f(MAC_STATUS,
4897              (MAC_STATUS_SYNC_CHANGED |
4898               MAC_STATUS_CFG_CHANGED |
4899               MAC_STATUS_MI_COMPLETION |
4900               MAC_STATUS_LNKSTATE_CHANGED));
4901         udelay(40);
4902
4903         if (force_reset)
4904                 tg3_phy_reset(tp);
4905
4906         current_link_up = 0;
4907         current_speed = SPEED_INVALID;
4908         current_duplex = DUPLEX_INVALID;
4909
4910         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4911         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4912         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4913                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4914                         bmsr |= BMSR_LSTATUS;
4915                 else
4916                         bmsr &= ~BMSR_LSTATUS;
4917         }
4918
4919         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4920
4921         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4922             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4923                 /* do nothing, just check for link up at the end */
4924         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4925                 u32 adv, new_adv;
4926
4927                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4928                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4929                                   ADVERTISE_1000XPAUSE |
4930                                   ADVERTISE_1000XPSE_ASYM |
4931                                   ADVERTISE_SLCT);
4932
4933                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4934
4935                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4936                         new_adv |= ADVERTISE_1000XHALF;
4937                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4938                         new_adv |= ADVERTISE_1000XFULL;
4939
4940                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4941                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4942                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4943                         tg3_writephy(tp, MII_BMCR, bmcr);
4944
4945                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4946                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4947                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4948
4949                         return err;
4950                 }
4951         } else {
4952                 u32 new_bmcr;
4953
4954                 bmcr &= ~BMCR_SPEED1000;
4955                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4956
4957                 if (tp->link_config.duplex == DUPLEX_FULL)
4958                         new_bmcr |= BMCR_FULLDPLX;
4959
4960                 if (new_bmcr != bmcr) {
4961                         /* BMCR_SPEED1000 is a reserved bit that needs
4962                          * to be set on write.
4963                          */
4964                         new_bmcr |= BMCR_SPEED1000;
4965
4966                         /* Force a linkdown */
4967                         if (netif_carrier_ok(tp->dev)) {
4968                                 u32 adv;
4969
4970                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4971                                 adv &= ~(ADVERTISE_1000XFULL |
4972                                          ADVERTISE_1000XHALF |
4973                                          ADVERTISE_SLCT);
4974                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4975                                 tg3_writephy(tp, MII_BMCR, bmcr |
4976                                                            BMCR_ANRESTART |
4977                                                            BMCR_ANENABLE);
4978                                 udelay(10);
4979                                 netif_carrier_off(tp->dev);
4980                         }
4981                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4982                         bmcr = new_bmcr;
4983                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4984                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4985                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4986                             ASIC_REV_5714) {
4987                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4988                                         bmsr |= BMSR_LSTATUS;
4989                                 else
4990                                         bmsr &= ~BMSR_LSTATUS;
4991                         }
4992                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4993                 }
4994         }
4995
4996         if (bmsr & BMSR_LSTATUS) {
4997                 current_speed = SPEED_1000;
4998                 current_link_up = 1;
4999                 if (bmcr & BMCR_FULLDPLX)
5000                         current_duplex = DUPLEX_FULL;
5001                 else
5002                         current_duplex = DUPLEX_HALF;
5003
5004                 local_adv = 0;
5005                 remote_adv = 0;
5006
5007                 if (bmcr & BMCR_ANENABLE) {
5008                         u32 common;
5009
5010                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5011                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5012                         common = local_adv & remote_adv;
5013                         if (common & (ADVERTISE_1000XHALF |
5014                                       ADVERTISE_1000XFULL)) {
5015                                 if (common & ADVERTISE_1000XFULL)
5016                                         current_duplex = DUPLEX_FULL;
5017                                 else
5018                                         current_duplex = DUPLEX_HALF;
5019                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5020                                 /* Link is up via parallel detect */
5021                         } else {
5022                                 current_link_up = 0;
5023                         }
5024                 }
5025         }
5026
5027         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5028                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5029
5030         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5031         if (tp->link_config.active_duplex == DUPLEX_HALF)
5032                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5033
5034         tw32_f(MAC_MODE, tp->mac_mode);
5035         udelay(40);
5036
5037         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5038
5039         tp->link_config.active_speed = current_speed;
5040         tp->link_config.active_duplex = current_duplex;
5041
5042         if (current_link_up != netif_carrier_ok(tp->dev)) {
5043                 if (current_link_up)
5044                         netif_carrier_on(tp->dev);
5045                 else {
5046                         netif_carrier_off(tp->dev);
5047                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5048                 }
5049                 tg3_link_report(tp);
5050         }
5051         return err;
5052 }
5053
5054 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5055 {
5056         if (tp->serdes_counter) {
5057                 /* Give autoneg time to complete. */
5058                 tp->serdes_counter--;
5059                 return;
5060         }
5061
5062         if (!netif_carrier_ok(tp->dev) &&
5063             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5064                 u32 bmcr;
5065
5066                 tg3_readphy(tp, MII_BMCR, &bmcr);
5067                 if (bmcr & BMCR_ANENABLE) {
5068                         u32 phy1, phy2;
5069
5070                         /* Select shadow register 0x1f */
5071                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5072                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5073
5074                         /* Select expansion interrupt status register */
5075                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5076                                          MII_TG3_DSP_EXP1_INT_STAT);
5077                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5078                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5079
5080                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5081                                 /* We have signal detect and not receiving
5082                                  * config code words, link is up by parallel
5083                                  * detection.
5084                                  */
5085
5086                                 bmcr &= ~BMCR_ANENABLE;
5087                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5088                                 tg3_writephy(tp, MII_BMCR, bmcr);
5089                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5090                         }
5091                 }
5092         } else if (netif_carrier_ok(tp->dev) &&
5093                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5094                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5095                 u32 phy2;
5096
5097                 /* Select expansion interrupt status register */
5098                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5099                                  MII_TG3_DSP_EXP1_INT_STAT);
5100                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5101                 if (phy2 & 0x20) {
5102                         u32 bmcr;
5103
5104                         /* Config code words received, turn on autoneg. */
5105                         tg3_readphy(tp, MII_BMCR, &bmcr);
5106                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5107
5108                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5109
5110                 }
5111         }
5112 }
5113
5114 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5115 {
5116         u32 val;
5117         int err;
5118
5119         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5120                 err = tg3_setup_fiber_phy(tp, force_reset);
5121         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5122                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5123         else
5124                 err = tg3_setup_copper_phy(tp, force_reset);
5125
5126         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5127                 u32 scale;
5128
5129                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5130                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5131                         scale = 65;
5132                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5133                         scale = 6;
5134                 else
5135                         scale = 12;
5136
5137                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5138                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5139                 tw32(GRC_MISC_CFG, val);
5140         }
5141
5142         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5143               (6 << TX_LENGTHS_IPG_SHIFT);
5144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5145                 val |= tr32(MAC_TX_LENGTHS) &
5146                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5147                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5148
5149         if (tp->link_config.active_speed == SPEED_1000 &&
5150             tp->link_config.active_duplex == DUPLEX_HALF)
5151                 tw32(MAC_TX_LENGTHS, val |
5152                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5153         else
5154                 tw32(MAC_TX_LENGTHS, val |
5155                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5156
5157         if (!tg3_flag(tp, 5705_PLUS)) {
5158                 if (netif_carrier_ok(tp->dev)) {
5159                         tw32(HOSTCC_STAT_COAL_TICKS,
5160                              tp->coal.stats_block_coalesce_usecs);
5161                 } else {
5162                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5163                 }
5164         }
5165
5166         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5167                 val = tr32(PCIE_PWR_MGMT_THRESH);
5168                 if (!netif_carrier_ok(tp->dev))
5169                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5170                               tp->pwrmgmt_thresh;
5171                 else
5172                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5173                 tw32(PCIE_PWR_MGMT_THRESH, val);
5174         }
5175
5176         return err;
5177 }
5178
5179 static inline int tg3_irq_sync(struct tg3 *tp)
5180 {
5181         return tp->irq_sync;
5182 }
5183
5184 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5185 {
5186         int i;
5187
5188         dst = (u32 *)((u8 *)dst + off);
5189         for (i = 0; i < len; i += sizeof(u32))
5190                 *dst++ = tr32(off + i);
5191 }
5192
5193 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5194 {
5195         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5196         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5197         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5198         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5199         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5200         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5201         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5202         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5203         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5204         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5205         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5206         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5207         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5208         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5209         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5210         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5211         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5212         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5213         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5214
5215         if (tg3_flag(tp, SUPPORT_MSIX))
5216                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5217
5218         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5219         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5220         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5221         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5222         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5223         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5224         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5225         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5226
5227         if (!tg3_flag(tp, 5705_PLUS)) {
5228                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5229                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5230                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5231         }
5232
5233         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5234         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5235         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5236         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5237         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5238
5239         if (tg3_flag(tp, NVRAM))
5240                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5241 }
5242
5243 static void tg3_dump_state(struct tg3 *tp)
5244 {
5245         int i;
5246         u32 *regs;
5247
5248         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5249         if (!regs) {
5250                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5251                 return;
5252         }
5253
5254         if (tg3_flag(tp, PCI_EXPRESS)) {
5255                 /* Read up to but not including private PCI registers */
5256                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5257                         regs[i / sizeof(u32)] = tr32(i);
5258         } else
5259                 tg3_dump_legacy_regs(tp, regs);
5260
5261         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5262                 if (!regs[i + 0] && !regs[i + 1] &&
5263                     !regs[i + 2] && !regs[i + 3])
5264                         continue;
5265
5266                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5267                            i * 4,
5268                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5269         }
5270
5271         kfree(regs);
5272
5273         for (i = 0; i < tp->irq_cnt; i++) {
5274                 struct tg3_napi *tnapi = &tp->napi[i];
5275
5276                 /* SW status block */
5277                 netdev_err(tp->dev,
5278                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5279                            i,
5280                            tnapi->hw_status->status,
5281                            tnapi->hw_status->status_tag,
5282                            tnapi->hw_status->rx_jumbo_consumer,
5283                            tnapi->hw_status->rx_consumer,
5284                            tnapi->hw_status->rx_mini_consumer,
5285                            tnapi->hw_status->idx[0].rx_producer,
5286                            tnapi->hw_status->idx[0].tx_consumer);
5287
5288                 netdev_err(tp->dev,
5289                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5290                            i,
5291                            tnapi->last_tag, tnapi->last_irq_tag,
5292                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5293                            tnapi->rx_rcb_ptr,
5294                            tnapi->prodring.rx_std_prod_idx,
5295                            tnapi->prodring.rx_std_cons_idx,
5296                            tnapi->prodring.rx_jmb_prod_idx,
5297                            tnapi->prodring.rx_jmb_cons_idx);
5298         }
5299 }
5300
5301 /* This is called whenever we suspect that the system chipset is re-
5302  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5303  * is bogus tx completions. We try to recover by setting the
5304  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5305  * in the workqueue.
5306  */
5307 static void tg3_tx_recover(struct tg3 *tp)
5308 {
5309         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5310                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5311
5312         netdev_warn(tp->dev,
5313                     "The system may be re-ordering memory-mapped I/O "
5314                     "cycles to the network device, attempting to recover. "
5315                     "Please report the problem to the driver maintainer "
5316                     "and include system chipset information.\n");
5317
5318         spin_lock(&tp->lock);
5319         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5320         spin_unlock(&tp->lock);
5321 }
5322
5323 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5324 {
5325         /* Tell compiler to fetch tx indices from memory. */
5326         barrier();
5327         return tnapi->tx_pending -
5328                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5329 }
5330
5331 /* Tigon3 never reports partial packet sends.  So we do not
5332  * need special logic to handle SKBs that have not had all
5333  * of their frags sent yet, like SunGEM does.
5334  */
5335 static void tg3_tx(struct tg3_napi *tnapi)
5336 {
5337         struct tg3 *tp = tnapi->tp;
5338         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5339         u32 sw_idx = tnapi->tx_cons;
5340         struct netdev_queue *txq;
5341         int index = tnapi - tp->napi;
5342
5343         if (tg3_flag(tp, ENABLE_TSS))
5344                 index--;
5345
5346         txq = netdev_get_tx_queue(tp->dev, index);
5347
5348         while (sw_idx != hw_idx) {
5349                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5350                 struct sk_buff *skb = ri->skb;
5351                 int i, tx_bug = 0;
5352
5353                 if (unlikely(skb == NULL)) {
5354                         tg3_tx_recover(tp);
5355                         return;
5356                 }
5357
5358                 pci_unmap_single(tp->pdev,
5359                                  dma_unmap_addr(ri, mapping),
5360                                  skb_headlen(skb),
5361                                  PCI_DMA_TODEVICE);
5362
5363                 ri->skb = NULL;
5364
5365                 while (ri->fragmented) {
5366                         ri->fragmented = false;
5367                         sw_idx = NEXT_TX(sw_idx);
5368                         ri = &tnapi->tx_buffers[sw_idx];
5369                 }
5370
5371                 sw_idx = NEXT_TX(sw_idx);
5372
5373                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5374                         ri = &tnapi->tx_buffers[sw_idx];
5375                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5376                                 tx_bug = 1;
5377
5378                         pci_unmap_page(tp->pdev,
5379                                        dma_unmap_addr(ri, mapping),
5380                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5381                                        PCI_DMA_TODEVICE);
5382
5383                         while (ri->fragmented) {
5384                                 ri->fragmented = false;
5385                                 sw_idx = NEXT_TX(sw_idx);
5386                                 ri = &tnapi->tx_buffers[sw_idx];
5387                         }
5388
5389                         sw_idx = NEXT_TX(sw_idx);
5390                 }
5391
5392                 dev_kfree_skb(skb);
5393
5394                 if (unlikely(tx_bug)) {
5395                         tg3_tx_recover(tp);
5396                         return;
5397                 }
5398         }
5399
5400         tnapi->tx_cons = sw_idx;
5401
5402         /* Need to make the tx_cons update visible to tg3_start_xmit()
5403          * before checking for netif_queue_stopped().  Without the
5404          * memory barrier, there is a small possibility that tg3_start_xmit()
5405          * will miss it and cause the queue to be stopped forever.
5406          */
5407         smp_mb();
5408
5409         if (unlikely(netif_tx_queue_stopped(txq) &&
5410                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5411                 __netif_tx_lock(txq, smp_processor_id());
5412                 if (netif_tx_queue_stopped(txq) &&
5413                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5414                         netif_tx_wake_queue(txq);
5415                 __netif_tx_unlock(txq);
5416         }
5417 }
5418
5419 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5420 {
5421         if (!ri->skb)
5422                 return;
5423
5424         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5425                          map_sz, PCI_DMA_FROMDEVICE);
5426         dev_kfree_skb_any(ri->skb);
5427         ri->skb = NULL;
5428 }
5429
5430 /* Returns size of skb allocated or < 0 on error.
5431  *
5432  * We only need to fill in the address because the other members
5433  * of the RX descriptor are invariant, see tg3_init_rings.
5434  *
5435  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5436  * posting buffers we only dirty the first cache line of the RX
5437  * descriptor (containing the address).  Whereas for the RX status
5438  * buffers the cpu only reads the last cacheline of the RX descriptor
5439  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5440  */
5441 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5442                             u32 opaque_key, u32 dest_idx_unmasked)
5443 {
5444         struct tg3_rx_buffer_desc *desc;
5445         struct ring_info *map;
5446         struct sk_buff *skb;
5447         dma_addr_t mapping;
5448         int skb_size, dest_idx;
5449
5450         switch (opaque_key) {
5451         case RXD_OPAQUE_RING_STD:
5452                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5453                 desc = &tpr->rx_std[dest_idx];
5454                 map = &tpr->rx_std_buffers[dest_idx];
5455                 skb_size = tp->rx_pkt_map_sz;
5456                 break;
5457
5458         case RXD_OPAQUE_RING_JUMBO:
5459                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5460                 desc = &tpr->rx_jmb[dest_idx].std;
5461                 map = &tpr->rx_jmb_buffers[dest_idx];
5462                 skb_size = TG3_RX_JMB_MAP_SZ;
5463                 break;
5464
5465         default:
5466                 return -EINVAL;
5467         }
5468
5469         /* Do not overwrite any of the map or rp information
5470          * until we are sure we can commit to a new buffer.
5471          *
5472          * Callers depend upon this behavior and assume that
5473          * we leave everything unchanged if we fail.
5474          */
5475         skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
5476         if (skb == NULL)
5477                 return -ENOMEM;
5478
5479         skb_reserve(skb, TG3_RX_OFFSET(tp));
5480
5481         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
5482                                  PCI_DMA_FROMDEVICE);
5483         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5484                 dev_kfree_skb(skb);
5485                 return -EIO;
5486         }
5487
5488         map->skb = skb;
5489         dma_unmap_addr_set(map, mapping, mapping);
5490
5491         desc->addr_hi = ((u64)mapping >> 32);
5492         desc->addr_lo = ((u64)mapping & 0xffffffff);
5493
5494         return skb_size;
5495 }
5496
5497 /* We only need to move over in the address because the other
5498  * members of the RX descriptor are invariant.  See notes above
5499  * tg3_alloc_rx_skb for full details.
5500  */
5501 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5502                            struct tg3_rx_prodring_set *dpr,
5503                            u32 opaque_key, int src_idx,
5504                            u32 dest_idx_unmasked)
5505 {
5506         struct tg3 *tp = tnapi->tp;
5507         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5508         struct ring_info *src_map, *dest_map;
5509         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5510         int dest_idx;
5511
5512         switch (opaque_key) {
5513         case RXD_OPAQUE_RING_STD:
5514                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5515                 dest_desc = &dpr->rx_std[dest_idx];
5516                 dest_map = &dpr->rx_std_buffers[dest_idx];
5517                 src_desc = &spr->rx_std[src_idx];
5518                 src_map = &spr->rx_std_buffers[src_idx];
5519                 break;
5520
5521         case RXD_OPAQUE_RING_JUMBO:
5522                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5523                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5524                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5525                 src_desc = &spr->rx_jmb[src_idx].std;
5526                 src_map = &spr->rx_jmb_buffers[src_idx];
5527                 break;
5528
5529         default:
5530                 return;
5531         }
5532
5533         dest_map->skb = src_map->skb;
5534         dma_unmap_addr_set(dest_map, mapping,
5535                            dma_unmap_addr(src_map, mapping));
5536         dest_desc->addr_hi = src_desc->addr_hi;
5537         dest_desc->addr_lo = src_desc->addr_lo;
5538
5539         /* Ensure that the update to the skb happens after the physical
5540          * addresses have been transferred to the new BD location.
5541          */
5542         smp_wmb();
5543
5544         src_map->skb = NULL;
5545 }
5546
5547 /* The RX ring scheme is composed of multiple rings which post fresh
5548  * buffers to the chip, and one special ring the chip uses to report
5549  * status back to the host.
5550  *
5551  * The special ring reports the status of received packets to the
5552  * host.  The chip does not write into the original descriptor the
5553  * RX buffer was obtained from.  The chip simply takes the original
5554  * descriptor as provided by the host, updates the status and length
5555  * field, then writes this into the next status ring entry.
5556  *
5557  * Each ring the host uses to post buffers to the chip is described
5558  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5559  * it is first placed into the on-chip ram.  When the packet's length
5560  * is known, it walks down the TG3_BDINFO entries to select the ring.
5561  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5562  * which is within the range of the new packet's length is chosen.
5563  *
5564  * The "separate ring for rx status" scheme may sound queer, but it makes
5565  * sense from a cache coherency perspective.  If only the host writes
5566  * to the buffer post rings, and only the chip writes to the rx status
5567  * rings, then cache lines never move beyond shared-modified state.
5568  * If both the host and chip were to write into the same ring, cache line
5569  * eviction could occur since both entities want it in an exclusive state.
5570  */
5571 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5572 {
5573         struct tg3 *tp = tnapi->tp;
5574         u32 work_mask, rx_std_posted = 0;
5575         u32 std_prod_idx, jmb_prod_idx;
5576         u32 sw_idx = tnapi->rx_rcb_ptr;
5577         u16 hw_idx;
5578         int received;
5579         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5580
5581         hw_idx = *(tnapi->rx_rcb_prod_idx);
5582         /*
5583          * We need to order the read of hw_idx and the read of
5584          * the opaque cookie.
5585          */
5586         rmb();
5587         work_mask = 0;
5588         received = 0;
5589         std_prod_idx = tpr->rx_std_prod_idx;
5590         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5591         while (sw_idx != hw_idx && budget > 0) {
5592                 struct ring_info *ri;
5593                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5594                 unsigned int len;
5595                 struct sk_buff *skb;
5596                 dma_addr_t dma_addr;
5597                 u32 opaque_key, desc_idx, *post_ptr;
5598
5599                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5600                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5601                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5602                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5603                         dma_addr = dma_unmap_addr(ri, mapping);
5604                         skb = ri->skb;
5605                         post_ptr = &std_prod_idx;
5606                         rx_std_posted++;
5607                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5608                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5609                         dma_addr = dma_unmap_addr(ri, mapping);
5610                         skb = ri->skb;
5611                         post_ptr = &jmb_prod_idx;
5612                 } else
5613                         goto next_pkt_nopost;
5614
5615                 work_mask |= opaque_key;
5616
5617                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5618                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5619                 drop_it:
5620                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5621                                        desc_idx, *post_ptr);
5622                 drop_it_no_recycle:
5623                         /* Other statistics kept track of by card. */
5624                         tp->rx_dropped++;
5625                         goto next_pkt;
5626                 }
5627
5628                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5629                       ETH_FCS_LEN;
5630
5631                 if (len > TG3_RX_COPY_THRESH(tp)) {
5632                         int skb_size;
5633
5634                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
5635                                                     *post_ptr);
5636                         if (skb_size < 0)
5637                                 goto drop_it;
5638
5639                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5640                                          PCI_DMA_FROMDEVICE);
5641
5642                         /* Ensure that the update to the skb happens
5643                          * after the usage of the old DMA mapping.
5644                          */
5645                         smp_wmb();
5646
5647                         ri->skb = NULL;
5648
5649                         skb_put(skb, len);
5650                 } else {
5651                         struct sk_buff *copy_skb;
5652
5653                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5654                                        desc_idx, *post_ptr);
5655
5656                         copy_skb = netdev_alloc_skb(tp->dev, len +
5657                                                     TG3_RAW_IP_ALIGN);
5658                         if (copy_skb == NULL)
5659                                 goto drop_it_no_recycle;
5660
5661                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
5662                         skb_put(copy_skb, len);
5663                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5664                         skb_copy_from_linear_data(skb, copy_skb->data, len);
5665                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5666
5667                         /* We'll reuse the original ring buffer. */
5668                         skb = copy_skb;
5669                 }
5670
5671                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5672                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5673                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5674                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5675                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5676                 else
5677                         skb_checksum_none_assert(skb);
5678
5679                 skb->protocol = eth_type_trans(skb, tp->dev);
5680
5681                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5682                     skb->protocol != htons(ETH_P_8021Q)) {
5683                         dev_kfree_skb(skb);
5684                         goto drop_it_no_recycle;
5685                 }
5686
5687                 if (desc->type_flags & RXD_FLAG_VLAN &&
5688                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5689                         __vlan_hwaccel_put_tag(skb,
5690                                                desc->err_vlan & RXD_VLAN_MASK);
5691
5692                 napi_gro_receive(&tnapi->napi, skb);
5693
5694                 received++;
5695                 budget--;
5696
5697 next_pkt:
5698                 (*post_ptr)++;
5699
5700                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5701                         tpr->rx_std_prod_idx = std_prod_idx &
5702                                                tp->rx_std_ring_mask;
5703                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5704                                      tpr->rx_std_prod_idx);
5705                         work_mask &= ~RXD_OPAQUE_RING_STD;
5706                         rx_std_posted = 0;
5707                 }
5708 next_pkt_nopost:
5709                 sw_idx++;
5710                 sw_idx &= tp->rx_ret_ring_mask;
5711
5712                 /* Refresh hw_idx to see if there is new work */
5713                 if (sw_idx == hw_idx) {
5714                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5715                         rmb();
5716                 }
5717         }
5718
5719         /* ACK the status ring. */
5720         tnapi->rx_rcb_ptr = sw_idx;
5721         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5722
5723         /* Refill RX ring(s). */
5724         if (!tg3_flag(tp, ENABLE_RSS)) {
5725                 if (work_mask & RXD_OPAQUE_RING_STD) {
5726                         tpr->rx_std_prod_idx = std_prod_idx &
5727                                                tp->rx_std_ring_mask;
5728                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5729                                      tpr->rx_std_prod_idx);
5730                 }
5731                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5732                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5733                                                tp->rx_jmb_ring_mask;
5734                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5735                                      tpr->rx_jmb_prod_idx);
5736                 }
5737                 mmiowb();
5738         } else if (work_mask) {
5739                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5740                  * updated before the producer indices can be updated.
5741                  */
5742                 smp_wmb();
5743
5744                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5745                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5746
5747                 if (tnapi != &tp->napi[1])
5748                         napi_schedule(&tp->napi[1].napi);
5749         }
5750
5751         return received;
5752 }
5753
5754 static void tg3_poll_link(struct tg3 *tp)
5755 {
5756         /* handle link change and other phy events */
5757         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5758                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5759
5760                 if (sblk->status & SD_STATUS_LINK_CHG) {
5761                         sblk->status = SD_STATUS_UPDATED |
5762                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5763                         spin_lock(&tp->lock);
5764                         if (tg3_flag(tp, USE_PHYLIB)) {
5765                                 tw32_f(MAC_STATUS,
5766                                      (MAC_STATUS_SYNC_CHANGED |
5767                                       MAC_STATUS_CFG_CHANGED |
5768                                       MAC_STATUS_MI_COMPLETION |
5769                                       MAC_STATUS_LNKSTATE_CHANGED));
5770                                 udelay(40);
5771                         } else
5772                                 tg3_setup_phy(tp, 0);
5773                         spin_unlock(&tp->lock);
5774                 }
5775         }
5776 }
5777
5778 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5779                                 struct tg3_rx_prodring_set *dpr,
5780                                 struct tg3_rx_prodring_set *spr)
5781 {
5782         u32 si, di, cpycnt, src_prod_idx;
5783         int i, err = 0;
5784
5785         while (1) {
5786                 src_prod_idx = spr->rx_std_prod_idx;
5787
5788                 /* Make sure updates to the rx_std_buffers[] entries and the
5789                  * standard producer index are seen in the correct order.
5790                  */
5791                 smp_rmb();
5792
5793                 if (spr->rx_std_cons_idx == src_prod_idx)
5794                         break;
5795
5796                 if (spr->rx_std_cons_idx < src_prod_idx)
5797                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5798                 else
5799                         cpycnt = tp->rx_std_ring_mask + 1 -
5800                                  spr->rx_std_cons_idx;
5801
5802                 cpycnt = min(cpycnt,
5803                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5804
5805                 si = spr->rx_std_cons_idx;
5806                 di = dpr->rx_std_prod_idx;
5807
5808                 for (i = di; i < di + cpycnt; i++) {
5809                         if (dpr->rx_std_buffers[i].skb) {
5810                                 cpycnt = i - di;
5811                                 err = -ENOSPC;
5812                                 break;
5813                         }
5814                 }
5815
5816                 if (!cpycnt)
5817                         break;
5818
5819                 /* Ensure that updates to the rx_std_buffers ring and the
5820                  * shadowed hardware producer ring from tg3_recycle_skb() are
5821                  * ordered correctly WRT the skb check above.
5822                  */
5823                 smp_rmb();
5824
5825                 memcpy(&dpr->rx_std_buffers[di],
5826                        &spr->rx_std_buffers[si],
5827                        cpycnt * sizeof(struct ring_info));
5828
5829                 for (i = 0; i < cpycnt; i++, di++, si++) {
5830                         struct tg3_rx_buffer_desc *sbd, *dbd;
5831                         sbd = &spr->rx_std[si];
5832                         dbd = &dpr->rx_std[di];
5833                         dbd->addr_hi = sbd->addr_hi;
5834                         dbd->addr_lo = sbd->addr_lo;
5835                 }
5836
5837                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5838                                        tp->rx_std_ring_mask;
5839                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5840                                        tp->rx_std_ring_mask;
5841         }
5842
5843         while (1) {
5844                 src_prod_idx = spr->rx_jmb_prod_idx;
5845
5846                 /* Make sure updates to the rx_jmb_buffers[] entries and
5847                  * the jumbo producer index are seen in the correct order.
5848                  */
5849                 smp_rmb();
5850
5851                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5852                         break;
5853
5854                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5855                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5856                 else
5857                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5858                                  spr->rx_jmb_cons_idx;
5859
5860                 cpycnt = min(cpycnt,
5861                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5862
5863                 si = spr->rx_jmb_cons_idx;
5864                 di = dpr->rx_jmb_prod_idx;
5865
5866                 for (i = di; i < di + cpycnt; i++) {
5867                         if (dpr->rx_jmb_buffers[i].skb) {
5868                                 cpycnt = i - di;
5869                                 err = -ENOSPC;
5870                                 break;
5871                         }
5872                 }
5873
5874                 if (!cpycnt)
5875                         break;
5876
5877                 /* Ensure that updates to the rx_jmb_buffers ring and the
5878                  * shadowed hardware producer ring from tg3_recycle_skb() are
5879                  * ordered correctly WRT the skb check above.
5880                  */
5881                 smp_rmb();
5882
5883                 memcpy(&dpr->rx_jmb_buffers[di],
5884                        &spr->rx_jmb_buffers[si],
5885                        cpycnt * sizeof(struct ring_info));
5886
5887                 for (i = 0; i < cpycnt; i++, di++, si++) {
5888                         struct tg3_rx_buffer_desc *sbd, *dbd;
5889                         sbd = &spr->rx_jmb[si].std;
5890                         dbd = &dpr->rx_jmb[di].std;
5891                         dbd->addr_hi = sbd->addr_hi;
5892                         dbd->addr_lo = sbd->addr_lo;
5893                 }
5894
5895                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5896                                        tp->rx_jmb_ring_mask;
5897                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5898                                        tp->rx_jmb_ring_mask;
5899         }
5900
5901         return err;
5902 }
5903
5904 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5905 {
5906         struct tg3 *tp = tnapi->tp;
5907
5908         /* run TX completion thread */
5909         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5910                 tg3_tx(tnapi);
5911                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5912                         return work_done;
5913         }
5914
5915         if (!tnapi->rx_rcb_prod_idx)
5916                 return work_done;
5917
5918         /* run RX thread, within the bounds set by NAPI.
5919          * All RX "locking" is done by ensuring outside
5920          * code synchronizes with tg3->napi.poll()
5921          */
5922         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5923                 work_done += tg3_rx(tnapi, budget - work_done);
5924
5925         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5926                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5927                 int i, err = 0;
5928                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5929                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5930
5931                 for (i = 1; i < tp->irq_cnt; i++)
5932                         err |= tg3_rx_prodring_xfer(tp, dpr,
5933                                                     &tp->napi[i].prodring);
5934
5935                 wmb();
5936
5937                 if (std_prod_idx != dpr->rx_std_prod_idx)
5938                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5939                                      dpr->rx_std_prod_idx);
5940
5941                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5942                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5943                                      dpr->rx_jmb_prod_idx);
5944
5945                 mmiowb();
5946
5947                 if (err)
5948                         tw32_f(HOSTCC_MODE, tp->coal_now);
5949         }
5950
5951         return work_done;
5952 }
5953
5954 static inline void tg3_reset_task_schedule(struct tg3 *tp)
5955 {
5956         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
5957                 schedule_work(&tp->reset_task);
5958 }
5959
5960 static inline void tg3_reset_task_cancel(struct tg3 *tp)
5961 {
5962         cancel_work_sync(&tp->reset_task);
5963         tg3_flag_clear(tp, RESET_TASK_PENDING);
5964 }
5965
5966 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5967 {
5968         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5969         struct tg3 *tp = tnapi->tp;
5970         int work_done = 0;
5971         struct tg3_hw_status *sblk = tnapi->hw_status;
5972
5973         while (1) {
5974                 work_done = tg3_poll_work(tnapi, work_done, budget);
5975
5976                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5977                         goto tx_recovery;
5978
5979                 if (unlikely(work_done >= budget))
5980                         break;
5981
5982                 /* tp->last_tag is used in tg3_int_reenable() below
5983                  * to tell the hw how much work has been processed,
5984                  * so we must read it before checking for more work.
5985                  */
5986                 tnapi->last_tag = sblk->status_tag;
5987                 tnapi->last_irq_tag = tnapi->last_tag;
5988                 rmb();
5989
5990                 /* check for RX/TX work to do */
5991                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5992                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5993                         napi_complete(napi);
5994                         /* Reenable interrupts. */
5995                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5996                         mmiowb();
5997                         break;
5998                 }
5999         }
6000
6001         return work_done;
6002
6003 tx_recovery:
6004         /* work_done is guaranteed to be less than budget. */
6005         napi_complete(napi);
6006         tg3_reset_task_schedule(tp);
6007         return work_done;
6008 }
6009
6010 static void tg3_process_error(struct tg3 *tp)
6011 {
6012         u32 val;
6013         bool real_error = false;
6014
6015         if (tg3_flag(tp, ERROR_PROCESSED))
6016                 return;
6017
6018         /* Check Flow Attention register */
6019         val = tr32(HOSTCC_FLOW_ATTN);
6020         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6021                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6022                 real_error = true;
6023         }
6024
6025         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6026                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6027                 real_error = true;
6028         }
6029
6030         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6031                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6032                 real_error = true;
6033         }
6034
6035         if (!real_error)
6036                 return;
6037
6038         tg3_dump_state(tp);
6039
6040         tg3_flag_set(tp, ERROR_PROCESSED);
6041         tg3_reset_task_schedule(tp);
6042 }
6043
6044 static int tg3_poll(struct napi_struct *napi, int budget)
6045 {
6046         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6047         struct tg3 *tp = tnapi->tp;
6048         int work_done = 0;
6049         struct tg3_hw_status *sblk = tnapi->hw_status;
6050
6051         while (1) {
6052                 if (sblk->status & SD_STATUS_ERROR)
6053                         tg3_process_error(tp);
6054
6055                 tg3_poll_link(tp);
6056
6057                 work_done = tg3_poll_work(tnapi, work_done, budget);
6058
6059                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6060                         goto tx_recovery;
6061
6062                 if (unlikely(work_done >= budget))
6063                         break;
6064
6065                 if (tg3_flag(tp, TAGGED_STATUS)) {
6066                         /* tp->last_tag is used in tg3_int_reenable() below
6067                          * to tell the hw how much work has been processed,
6068                          * so we must read it before checking for more work.
6069                          */
6070                         tnapi->last_tag = sblk->status_tag;
6071                         tnapi->last_irq_tag = tnapi->last_tag;
6072                         rmb();
6073                 } else
6074                         sblk->status &= ~SD_STATUS_UPDATED;
6075
6076                 if (likely(!tg3_has_work(tnapi))) {
6077                         napi_complete(napi);
6078                         tg3_int_reenable(tnapi);
6079                         break;
6080                 }
6081         }
6082
6083         return work_done;
6084
6085 tx_recovery:
6086         /* work_done is guaranteed to be less than budget. */
6087         napi_complete(napi);
6088         tg3_reset_task_schedule(tp);
6089         return work_done;
6090 }
6091
6092 static void tg3_napi_disable(struct tg3 *tp)
6093 {
6094         int i;
6095
6096         for (i = tp->irq_cnt - 1; i >= 0; i--)
6097                 napi_disable(&tp->napi[i].napi);
6098 }
6099
6100 static void tg3_napi_enable(struct tg3 *tp)
6101 {
6102         int i;
6103
6104         for (i = 0; i < tp->irq_cnt; i++)
6105                 napi_enable(&tp->napi[i].napi);
6106 }
6107
6108 static void tg3_napi_init(struct tg3 *tp)
6109 {
6110         int i;
6111
6112         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6113         for (i = 1; i < tp->irq_cnt; i++)
6114                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6115 }
6116
6117 static void tg3_napi_fini(struct tg3 *tp)
6118 {
6119         int i;
6120
6121         for (i = 0; i < tp->irq_cnt; i++)
6122                 netif_napi_del(&tp->napi[i].napi);
6123 }
6124
6125 static inline void tg3_netif_stop(struct tg3 *tp)
6126 {
6127         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6128         tg3_napi_disable(tp);
6129         netif_tx_disable(tp->dev);
6130 }
6131
6132 static inline void tg3_netif_start(struct tg3 *tp)
6133 {
6134         /* NOTE: unconditional netif_tx_wake_all_queues is only
6135          * appropriate so long as all callers are assured to
6136          * have free tx slots (such as after tg3_init_hw)
6137          */
6138         netif_tx_wake_all_queues(tp->dev);
6139
6140         tg3_napi_enable(tp);
6141         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6142         tg3_enable_ints(tp);
6143 }
6144
6145 static void tg3_irq_quiesce(struct tg3 *tp)
6146 {
6147         int i;
6148
6149         BUG_ON(tp->irq_sync);
6150
6151         tp->irq_sync = 1;
6152         smp_mb();
6153
6154         for (i = 0; i < tp->irq_cnt; i++)
6155                 synchronize_irq(tp->napi[i].irq_vec);
6156 }
6157
6158 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6159  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6160  * with as well.  Most of the time, this is not necessary except when
6161  * shutting down the device.
6162  */
6163 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6164 {
6165         spin_lock_bh(&tp->lock);
6166         if (irq_sync)
6167                 tg3_irq_quiesce(tp);
6168 }
6169
6170 static inline void tg3_full_unlock(struct tg3 *tp)
6171 {
6172         spin_unlock_bh(&tp->lock);
6173 }
6174
6175 /* One-shot MSI handler - Chip automatically disables interrupt
6176  * after sending MSI so driver doesn't have to do it.
6177  */
6178 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6179 {
6180         struct tg3_napi *tnapi = dev_id;
6181         struct tg3 *tp = tnapi->tp;
6182
6183         prefetch(tnapi->hw_status);
6184         if (tnapi->rx_rcb)
6185                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6186
6187         if (likely(!tg3_irq_sync(tp)))
6188                 napi_schedule(&tnapi->napi);
6189
6190         return IRQ_HANDLED;
6191 }
6192
6193 /* MSI ISR - No need to check for interrupt sharing and no need to
6194  * flush status block and interrupt mailbox. PCI ordering rules
6195  * guarantee that MSI will arrive after the status block.
6196  */
6197 static irqreturn_t tg3_msi(int irq, void *dev_id)
6198 {
6199         struct tg3_napi *tnapi = dev_id;
6200         struct tg3 *tp = tnapi->tp;
6201
6202         prefetch(tnapi->hw_status);
6203         if (tnapi->rx_rcb)
6204                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6205         /*
6206          * Writing any value to intr-mbox-0 clears PCI INTA# and
6207          * chip-internal interrupt pending events.
6208          * Writing non-zero to intr-mbox-0 additional tells the
6209          * NIC to stop sending us irqs, engaging "in-intr-handler"
6210          * event coalescing.
6211          */
6212         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6213         if (likely(!tg3_irq_sync(tp)))
6214                 napi_schedule(&tnapi->napi);
6215
6216         return IRQ_RETVAL(1);
6217 }
6218
6219 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6220 {
6221         struct tg3_napi *tnapi = dev_id;
6222         struct tg3 *tp = tnapi->tp;
6223         struct tg3_hw_status *sblk = tnapi->hw_status;
6224         unsigned int handled = 1;
6225
6226         /* In INTx mode, it is possible for the interrupt to arrive at
6227          * the CPU before the status block posted prior to the interrupt.
6228          * Reading the PCI State register will confirm whether the
6229          * interrupt is ours and will flush the status block.
6230          */
6231         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6232                 if (tg3_flag(tp, CHIP_RESETTING) ||
6233                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6234                         handled = 0;
6235                         goto out;
6236                 }
6237         }
6238
6239         /*
6240          * Writing any value to intr-mbox-0 clears PCI INTA# and
6241          * chip-internal interrupt pending events.
6242          * Writing non-zero to intr-mbox-0 additional tells the
6243          * NIC to stop sending us irqs, engaging "in-intr-handler"
6244          * event coalescing.
6245          *
6246          * Flush the mailbox to de-assert the IRQ immediately to prevent
6247          * spurious interrupts.  The flush impacts performance but
6248          * excessive spurious interrupts can be worse in some cases.
6249          */
6250         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6251         if (tg3_irq_sync(tp))
6252                 goto out;
6253         sblk->status &= ~SD_STATUS_UPDATED;
6254         if (likely(tg3_has_work(tnapi))) {
6255                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6256                 napi_schedule(&tnapi->napi);
6257         } else {
6258                 /* No work, shared interrupt perhaps?  re-enable
6259                  * interrupts, and flush that PCI write
6260                  */
6261                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6262                                0x00000000);
6263         }
6264 out:
6265         return IRQ_RETVAL(handled);
6266 }
6267
6268 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6269 {
6270         struct tg3_napi *tnapi = dev_id;
6271         struct tg3 *tp = tnapi->tp;
6272         struct tg3_hw_status *sblk = tnapi->hw_status;
6273         unsigned int handled = 1;
6274
6275         /* In INTx mode, it is possible for the interrupt to arrive at
6276          * the CPU before the status block posted prior to the interrupt.
6277          * Reading the PCI State register will confirm whether the
6278          * interrupt is ours and will flush the status block.
6279          */
6280         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6281                 if (tg3_flag(tp, CHIP_RESETTING) ||
6282                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6283                         handled = 0;
6284                         goto out;
6285                 }
6286         }
6287
6288         /*
6289          * writing any value to intr-mbox-0 clears PCI INTA# and
6290          * chip-internal interrupt pending events.
6291          * writing non-zero to intr-mbox-0 additional tells the
6292          * NIC to stop sending us irqs, engaging "in-intr-handler"
6293          * event coalescing.
6294          *
6295          * Flush the mailbox to de-assert the IRQ immediately to prevent
6296          * spurious interrupts.  The flush impacts performance but
6297          * excessive spurious interrupts can be worse in some cases.
6298          */
6299         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6300
6301         /*
6302          * In a shared interrupt configuration, sometimes other devices'
6303          * interrupts will scream.  We record the current status tag here
6304          * so that the above check can report that the screaming interrupts
6305          * are unhandled.  Eventually they will be silenced.
6306          */
6307         tnapi->last_irq_tag = sblk->status_tag;
6308
6309         if (tg3_irq_sync(tp))
6310                 goto out;
6311
6312         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6313
6314         napi_schedule(&tnapi->napi);
6315
6316 out:
6317         return IRQ_RETVAL(handled);
6318 }
6319
6320 /* ISR for interrupt test */
6321 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6322 {
6323         struct tg3_napi *tnapi = dev_id;
6324         struct tg3 *tp = tnapi->tp;
6325         struct tg3_hw_status *sblk = tnapi->hw_status;
6326
6327         if ((sblk->status & SD_STATUS_UPDATED) ||
6328             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6329                 tg3_disable_ints(tp);
6330                 return IRQ_RETVAL(1);
6331         }
6332         return IRQ_RETVAL(0);
6333 }
6334
6335 static int tg3_init_hw(struct tg3 *, int);
6336 static int tg3_halt(struct tg3 *, int, int);
6337
6338 /* Restart hardware after configuration changes, self-test, etc.
6339  * Invoked with tp->lock held.
6340  */
6341 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
6342         __releases(tp->lock)
6343         __acquires(tp->lock)
6344 {
6345         int err;
6346
6347         err = tg3_init_hw(tp, reset_phy);
6348         if (err) {
6349                 netdev_err(tp->dev,
6350                            "Failed to re-initialize device, aborting\n");
6351                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6352                 tg3_full_unlock(tp);
6353                 del_timer_sync(&tp->timer);
6354                 tp->irq_sync = 0;
6355                 tg3_napi_enable(tp);
6356                 dev_close(tp->dev);
6357                 tg3_full_lock(tp, 0);
6358         }
6359         return err;
6360 }
6361
6362 #ifdef CONFIG_NET_POLL_CONTROLLER
6363 static void tg3_poll_controller(struct net_device *dev)
6364 {
6365         int i;
6366         struct tg3 *tp = netdev_priv(dev);
6367
6368         if (tg3_irq_sync(tp))
6369                 return;
6370
6371         for (i = 0; i < tp->irq_cnt; i++)
6372                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6373 }
6374 #endif
6375
6376 static void tg3_reset_task(struct work_struct *work)
6377 {
6378         struct tg3 *tp = container_of(work, struct tg3, reset_task);
6379         int err;
6380
6381         tg3_full_lock(tp, 0);
6382
6383         if (!netif_running(tp->dev)) {
6384                 tg3_flag_clear(tp, RESET_TASK_PENDING);
6385                 tg3_full_unlock(tp);
6386                 return;
6387         }
6388
6389         tg3_full_unlock(tp);
6390
6391         tg3_phy_stop(tp);
6392
6393         tg3_netif_stop(tp);
6394
6395         tg3_full_lock(tp, 1);
6396
6397         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
6398                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
6399                 tp->write32_rx_mbox = tg3_write_flush_reg32;
6400                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
6401                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6402         }
6403
6404         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
6405         err = tg3_init_hw(tp, 1);
6406         if (err)
6407                 goto out;
6408
6409         tg3_netif_start(tp);
6410
6411 out:
6412         tg3_full_unlock(tp);
6413
6414         if (!err)
6415                 tg3_phy_start(tp);
6416
6417         tg3_flag_clear(tp, RESET_TASK_PENDING);
6418 }
6419
6420 static void tg3_tx_timeout(struct net_device *dev)
6421 {
6422         struct tg3 *tp = netdev_priv(dev);
6423
6424         if (netif_msg_tx_err(tp)) {
6425                 netdev_err(dev, "transmit timed out, resetting\n");
6426                 tg3_dump_state(tp);
6427         }
6428
6429         tg3_reset_task_schedule(tp);
6430 }
6431
6432 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6433 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6434 {
6435         u32 base = (u32) mapping & 0xffffffff;
6436
6437         return (base > 0xffffdcc0) && (base + len + 8 < base);
6438 }
6439
6440 /* Test for DMA addresses > 40-bit */
6441 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6442                                           int len)
6443 {
6444 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6445         if (tg3_flag(tp, 40BIT_DMA_BUG))
6446                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6447         return 0;
6448 #else
6449         return 0;
6450 #endif
6451 }
6452
6453 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6454                                  dma_addr_t mapping, u32 len, u32 flags,
6455                                  u32 mss, u32 vlan)
6456 {
6457         txbd->addr_hi = ((u64) mapping >> 32);
6458         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6459         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6460         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6461 }
6462
6463 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6464                             dma_addr_t map, u32 len, u32 flags,
6465                             u32 mss, u32 vlan)
6466 {
6467         struct tg3 *tp = tnapi->tp;
6468         bool hwbug = false;
6469
6470         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6471                 hwbug = 1;
6472
6473         if (tg3_4g_overflow_test(map, len))
6474                 hwbug = 1;
6475
6476         if (tg3_40bit_overflow_test(tp, map, len))
6477                 hwbug = 1;
6478
6479         if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
6480                 u32 prvidx = *entry;
6481                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6482                 while (len > TG3_TX_BD_DMA_MAX && *budget) {
6483                         u32 frag_len = TG3_TX_BD_DMA_MAX;
6484                         len -= TG3_TX_BD_DMA_MAX;
6485
6486                         /* Avoid the 8byte DMA problem */
6487                         if (len <= 8) {
6488                                 len += TG3_TX_BD_DMA_MAX / 2;
6489                                 frag_len = TG3_TX_BD_DMA_MAX / 2;
6490                         }
6491
6492                         tnapi->tx_buffers[*entry].fragmented = true;
6493
6494                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6495                                       frag_len, tmp_flag, mss, vlan);
6496                         *budget -= 1;
6497                         prvidx = *entry;
6498                         *entry = NEXT_TX(*entry);
6499
6500                         map += frag_len;
6501                 }
6502
6503                 if (len) {
6504                         if (*budget) {
6505                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6506                                               len, flags, mss, vlan);
6507                                 *budget -= 1;
6508                                 *entry = NEXT_TX(*entry);
6509                         } else {
6510                                 hwbug = 1;
6511                                 tnapi->tx_buffers[prvidx].fragmented = false;
6512                         }
6513                 }
6514         } else {
6515                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6516                               len, flags, mss, vlan);
6517                 *entry = NEXT_TX(*entry);
6518         }
6519
6520         return hwbug;
6521 }
6522
6523 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6524 {
6525         int i;
6526         struct sk_buff *skb;
6527         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6528
6529         skb = txb->skb;
6530         txb->skb = NULL;
6531
6532         pci_unmap_single(tnapi->tp->pdev,
6533                          dma_unmap_addr(txb, mapping),
6534                          skb_headlen(skb),
6535                          PCI_DMA_TODEVICE);
6536
6537         while (txb->fragmented) {
6538                 txb->fragmented = false;
6539                 entry = NEXT_TX(entry);
6540                 txb = &tnapi->tx_buffers[entry];
6541         }
6542
6543         for (i = 0; i <= last; i++) {
6544                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6545
6546                 entry = NEXT_TX(entry);
6547                 txb = &tnapi->tx_buffers[entry];
6548
6549                 pci_unmap_page(tnapi->tp->pdev,
6550                                dma_unmap_addr(txb, mapping),
6551                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6552
6553                 while (txb->fragmented) {
6554                         txb->fragmented = false;
6555                         entry = NEXT_TX(entry);
6556                         txb = &tnapi->tx_buffers[entry];
6557                 }
6558         }
6559 }
6560
6561 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6562 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6563                                        struct sk_buff **pskb,
6564                                        u32 *entry, u32 *budget,
6565                                        u32 base_flags, u32 mss, u32 vlan)
6566 {
6567         struct tg3 *tp = tnapi->tp;
6568         struct sk_buff *new_skb, *skb = *pskb;
6569         dma_addr_t new_addr = 0;
6570         int ret = 0;
6571
6572         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6573                 new_skb = skb_copy(skb, GFP_ATOMIC);
6574         else {
6575                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6576
6577                 new_skb = skb_copy_expand(skb,
6578                                           skb_headroom(skb) + more_headroom,
6579                                           skb_tailroom(skb), GFP_ATOMIC);
6580         }
6581
6582         if (!new_skb) {
6583                 ret = -1;
6584         } else {
6585                 /* New SKB is guaranteed to be linear. */
6586                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6587                                           PCI_DMA_TODEVICE);
6588                 /* Make sure the mapping succeeded */
6589                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6590                         dev_kfree_skb(new_skb);
6591                         ret = -1;
6592                 } else {
6593                         u32 save_entry = *entry;
6594
6595                         base_flags |= TXD_FLAG_END;
6596
6597                         tnapi->tx_buffers[*entry].skb = new_skb;
6598                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6599                                            mapping, new_addr);
6600
6601                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6602                                             new_skb->len, base_flags,
6603                                             mss, vlan)) {
6604                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6605                                 dev_kfree_skb(new_skb);
6606                                 ret = -1;
6607                         }
6608                 }
6609         }
6610
6611         dev_kfree_skb(skb);
6612         *pskb = new_skb;
6613         return ret;
6614 }
6615
6616 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6617
6618 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6619  * TSO header is greater than 80 bytes.
6620  */
6621 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6622 {
6623         struct sk_buff *segs, *nskb;
6624         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6625
6626         /* Estimate the number of fragments in the worst case */
6627         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6628                 netif_stop_queue(tp->dev);
6629
6630                 /* netif_tx_stop_queue() must be done before checking
6631                  * checking tx index in tg3_tx_avail() below, because in
6632                  * tg3_tx(), we update tx index before checking for
6633                  * netif_tx_queue_stopped().
6634                  */
6635                 smp_mb();
6636                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6637                         return NETDEV_TX_BUSY;
6638
6639                 netif_wake_queue(tp->dev);
6640         }
6641
6642         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6643         if (IS_ERR(segs))
6644                 goto tg3_tso_bug_end;
6645
6646         do {
6647                 nskb = segs;
6648                 segs = segs->next;
6649                 nskb->next = NULL;
6650                 tg3_start_xmit(nskb, tp->dev);
6651         } while (segs);
6652
6653 tg3_tso_bug_end:
6654         dev_kfree_skb(skb);
6655
6656         return NETDEV_TX_OK;
6657 }
6658
6659 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6660  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6661  */
6662 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6663 {
6664         struct tg3 *tp = netdev_priv(dev);
6665         u32 len, entry, base_flags, mss, vlan = 0;
6666         u32 budget;
6667         int i = -1, would_hit_hwbug;
6668         dma_addr_t mapping;
6669         struct tg3_napi *tnapi;
6670         struct netdev_queue *txq;
6671         unsigned int last;
6672
6673         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6674         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6675         if (tg3_flag(tp, ENABLE_TSS))
6676                 tnapi++;
6677
6678         budget = tg3_tx_avail(tnapi);
6679
6680         /* We are running in BH disabled context with netif_tx_lock
6681          * and TX reclaim runs via tp->napi.poll inside of a software
6682          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6683          * no IRQ context deadlocks to worry about either.  Rejoice!
6684          */
6685         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6686                 if (!netif_tx_queue_stopped(txq)) {
6687                         netif_tx_stop_queue(txq);
6688
6689                         /* This is a hard error, log it. */
6690                         netdev_err(dev,
6691                                    "BUG! Tx Ring full when queue awake!\n");
6692                 }
6693                 return NETDEV_TX_BUSY;
6694         }
6695
6696         entry = tnapi->tx_prod;
6697         base_flags = 0;
6698         if (skb->ip_summed == CHECKSUM_PARTIAL)
6699                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6700
6701         mss = skb_shinfo(skb)->gso_size;
6702         if (mss) {
6703                 struct iphdr *iph;
6704                 u32 tcp_opt_len, hdr_len;
6705
6706                 if (skb_header_cloned(skb) &&
6707                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6708                         goto drop;
6709
6710                 iph = ip_hdr(skb);
6711                 tcp_opt_len = tcp_optlen(skb);
6712
6713                 if (skb_is_gso_v6(skb)) {
6714                         hdr_len = skb_headlen(skb) - ETH_HLEN;
6715                 } else {
6716                         u32 ip_tcp_len;
6717
6718                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
6719                         hdr_len = ip_tcp_len + tcp_opt_len;
6720
6721                         iph->check = 0;
6722                         iph->tot_len = htons(mss + hdr_len);
6723                 }
6724
6725                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6726                     tg3_flag(tp, TSO_BUG))
6727                         return tg3_tso_bug(tp, skb);
6728
6729                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6730                                TXD_FLAG_CPU_POST_DMA);
6731
6732                 if (tg3_flag(tp, HW_TSO_1) ||
6733                     tg3_flag(tp, HW_TSO_2) ||
6734                     tg3_flag(tp, HW_TSO_3)) {
6735                         tcp_hdr(skb)->check = 0;
6736                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6737                 } else
6738                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6739                                                                  iph->daddr, 0,
6740                                                                  IPPROTO_TCP,
6741                                                                  0);
6742
6743                 if (tg3_flag(tp, HW_TSO_3)) {
6744                         mss |= (hdr_len & 0xc) << 12;
6745                         if (hdr_len & 0x10)
6746                                 base_flags |= 0x00000010;
6747                         base_flags |= (hdr_len & 0x3e0) << 5;
6748                 } else if (tg3_flag(tp, HW_TSO_2))
6749                         mss |= hdr_len << 9;
6750                 else if (tg3_flag(tp, HW_TSO_1) ||
6751                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6752                         if (tcp_opt_len || iph->ihl > 5) {
6753                                 int tsflags;
6754
6755                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6756                                 mss |= (tsflags << 11);
6757                         }
6758                 } else {
6759                         if (tcp_opt_len || iph->ihl > 5) {
6760                                 int tsflags;
6761
6762                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6763                                 base_flags |= tsflags << 12;
6764                         }
6765                 }
6766         }
6767
6768         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6769             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6770                 base_flags |= TXD_FLAG_JMB_PKT;
6771
6772         if (vlan_tx_tag_present(skb)) {
6773                 base_flags |= TXD_FLAG_VLAN;
6774                 vlan = vlan_tx_tag_get(skb);
6775         }
6776
6777         len = skb_headlen(skb);
6778
6779         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6780         if (pci_dma_mapping_error(tp->pdev, mapping))
6781                 goto drop;
6782
6783
6784         tnapi->tx_buffers[entry].skb = skb;
6785         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6786
6787         would_hit_hwbug = 0;
6788
6789         if (tg3_flag(tp, 5701_DMA_BUG))
6790                 would_hit_hwbug = 1;
6791
6792         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6793                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6794                             mss, vlan)) {
6795                 would_hit_hwbug = 1;
6796         /* Now loop through additional data fragments, and queue them. */
6797         } else if (skb_shinfo(skb)->nr_frags > 0) {
6798                 u32 tmp_mss = mss;
6799
6800                 if (!tg3_flag(tp, HW_TSO_1) &&
6801                     !tg3_flag(tp, HW_TSO_2) &&
6802                     !tg3_flag(tp, HW_TSO_3))
6803                         tmp_mss = 0;
6804
6805                 last = skb_shinfo(skb)->nr_frags - 1;
6806                 for (i = 0; i <= last; i++) {
6807                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6808
6809                         len = skb_frag_size(frag);
6810                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6811                                                    len, DMA_TO_DEVICE);
6812
6813                         tnapi->tx_buffers[entry].skb = NULL;
6814                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6815                                            mapping);
6816                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6817                                 goto dma_error;
6818
6819                         if (!budget ||
6820                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6821                                             len, base_flags |
6822                                             ((i == last) ? TXD_FLAG_END : 0),
6823                                             tmp_mss, vlan)) {
6824                                 would_hit_hwbug = 1;
6825                                 break;
6826                         }
6827                 }
6828         }
6829
6830         if (would_hit_hwbug) {
6831                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6832
6833                 /* If the workaround fails due to memory/mapping
6834                  * failure, silently drop this packet.
6835                  */
6836                 entry = tnapi->tx_prod;
6837                 budget = tg3_tx_avail(tnapi);
6838                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6839                                                 base_flags, mss, vlan))
6840                         goto drop_nofree;
6841         }
6842
6843         skb_tx_timestamp(skb);
6844
6845         /* Packets are ready, update Tx producer idx local and on card. */
6846         tw32_tx_mbox(tnapi->prodmbox, entry);
6847
6848         tnapi->tx_prod = entry;
6849         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6850                 netif_tx_stop_queue(txq);
6851
6852                 /* netif_tx_stop_queue() must be done before checking
6853                  * checking tx index in tg3_tx_avail() below, because in
6854                  * tg3_tx(), we update tx index before checking for
6855                  * netif_tx_queue_stopped().
6856                  */
6857                 smp_mb();
6858                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6859                         netif_tx_wake_queue(txq);
6860         }
6861
6862         mmiowb();
6863         return NETDEV_TX_OK;
6864
6865 dma_error:
6866         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6867         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6868 drop:
6869         dev_kfree_skb(skb);
6870 drop_nofree:
6871         tp->tx_dropped++;
6872         return NETDEV_TX_OK;
6873 }
6874
6875 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
6876 {
6877         if (enable) {
6878                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
6879                                   MAC_MODE_PORT_MODE_MASK);
6880
6881                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6882
6883                 if (!tg3_flag(tp, 5705_PLUS))
6884                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
6885
6886                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
6887                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
6888                 else
6889                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
6890         } else {
6891                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6892
6893                 if (tg3_flag(tp, 5705_PLUS) ||
6894                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
6895                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
6896                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
6897         }
6898
6899         tw32(MAC_MODE, tp->mac_mode);
6900         udelay(40);
6901 }
6902
6903 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
6904 {
6905         u32 val, bmcr, mac_mode, ptest = 0;
6906
6907         tg3_phy_toggle_apd(tp, false);
6908         tg3_phy_toggle_automdix(tp, 0);
6909
6910         if (extlpbk && tg3_phy_set_extloopbk(tp))
6911                 return -EIO;
6912
6913         bmcr = BMCR_FULLDPLX;
6914         switch (speed) {
6915         case SPEED_10:
6916                 break;
6917         case SPEED_100:
6918                 bmcr |= BMCR_SPEED100;
6919                 break;
6920         case SPEED_1000:
6921         default:
6922                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
6923                         speed = SPEED_100;
6924                         bmcr |= BMCR_SPEED100;
6925                 } else {
6926                         speed = SPEED_1000;
6927                         bmcr |= BMCR_SPEED1000;
6928                 }
6929         }
6930
6931         if (extlpbk) {
6932                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
6933                         tg3_readphy(tp, MII_CTRL1000, &val);
6934                         val |= CTL1000_AS_MASTER |
6935                                CTL1000_ENABLE_MASTER;
6936                         tg3_writephy(tp, MII_CTRL1000, val);
6937                 } else {
6938                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
6939                                 MII_TG3_FET_PTEST_TRIM_2;
6940                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
6941                 }
6942         } else
6943                 bmcr |= BMCR_LOOPBACK;
6944
6945         tg3_writephy(tp, MII_BMCR, bmcr);
6946
6947         /* The write needs to be flushed for the FETs */
6948         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
6949                 tg3_readphy(tp, MII_BMCR, &bmcr);
6950
6951         udelay(40);
6952
6953         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
6954             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
6955                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
6956                              MII_TG3_FET_PTEST_FRC_TX_LINK |
6957                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
6958
6959                 /* The write needs to be flushed for the AC131 */
6960                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
6961         }
6962
6963         /* Reset to prevent losing 1st rx packet intermittently */
6964         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
6965             tg3_flag(tp, 5780_CLASS)) {
6966                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
6967                 udelay(10);
6968                 tw32_f(MAC_RX_MODE, tp->rx_mode);
6969         }
6970
6971         mac_mode = tp->mac_mode &
6972                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
6973         if (speed == SPEED_1000)
6974                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
6975         else
6976                 mac_mode |= MAC_MODE_PORT_MODE_MII;
6977
6978         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
6979                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
6980
6981                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
6982                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
6983                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
6984                         mac_mode |= MAC_MODE_LINK_POLARITY;
6985
6986                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
6987                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
6988         }
6989
6990         tw32(MAC_MODE, mac_mode);
6991         udelay(40);
6992
6993         return 0;
6994 }
6995
6996 static void tg3_set_loopback(struct net_device *dev, u32 features)
6997 {
6998         struct tg3 *tp = netdev_priv(dev);
6999
7000         if (features & NETIF_F_LOOPBACK) {
7001                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7002                         return;
7003
7004                 spin_lock_bh(&tp->lock);
7005                 tg3_mac_loopback(tp, true);
7006                 netif_carrier_on(tp->dev);
7007                 spin_unlock_bh(&tp->lock);
7008                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7009         } else {
7010                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7011                         return;
7012
7013                 spin_lock_bh(&tp->lock);
7014                 tg3_mac_loopback(tp, false);
7015                 /* Force link status check */
7016                 tg3_setup_phy(tp, 1);
7017                 spin_unlock_bh(&tp->lock);
7018                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7019         }
7020 }
7021
7022 static u32 tg3_fix_features(struct net_device *dev, u32 features)
7023 {
7024         struct tg3 *tp = netdev_priv(dev);
7025
7026         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7027                 features &= ~NETIF_F_ALL_TSO;
7028
7029         return features;
7030 }
7031
7032 static int tg3_set_features(struct net_device *dev, u32 features)
7033 {
7034         u32 changed = dev->features ^ features;
7035
7036         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7037                 tg3_set_loopback(dev, features);
7038
7039         return 0;
7040 }
7041
7042 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
7043                                int new_mtu)
7044 {
7045         dev->mtu = new_mtu;
7046
7047         if (new_mtu > ETH_DATA_LEN) {
7048                 if (tg3_flag(tp, 5780_CLASS)) {
7049                         netdev_update_features(dev);
7050                         tg3_flag_clear(tp, TSO_CAPABLE);
7051                 } else {
7052                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
7053                 }
7054         } else {
7055                 if (tg3_flag(tp, 5780_CLASS)) {
7056                         tg3_flag_set(tp, TSO_CAPABLE);
7057                         netdev_update_features(dev);
7058                 }
7059                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
7060         }
7061 }
7062
7063 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
7064 {
7065         struct tg3 *tp = netdev_priv(dev);
7066         int err;
7067
7068         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
7069                 return -EINVAL;
7070
7071         if (!netif_running(dev)) {
7072                 /* We'll just catch it later when the
7073                  * device is up'd.
7074                  */
7075                 tg3_set_mtu(dev, tp, new_mtu);
7076                 return 0;
7077         }
7078
7079         tg3_phy_stop(tp);
7080
7081         tg3_netif_stop(tp);
7082
7083         tg3_full_lock(tp, 1);
7084
7085         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
7086
7087         tg3_set_mtu(dev, tp, new_mtu);
7088
7089         err = tg3_restart_hw(tp, 0);
7090
7091         if (!err)
7092                 tg3_netif_start(tp);
7093
7094         tg3_full_unlock(tp);
7095
7096         if (!err)
7097                 tg3_phy_start(tp);
7098
7099         return err;
7100 }
7101
7102 static void tg3_rx_prodring_free(struct tg3 *tp,
7103                                  struct tg3_rx_prodring_set *tpr)
7104 {
7105         int i;
7106
7107         if (tpr != &tp->napi[0].prodring) {
7108                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7109                      i = (i + 1) & tp->rx_std_ring_mask)
7110                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7111                                         tp->rx_pkt_map_sz);
7112
7113                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7114                         for (i = tpr->rx_jmb_cons_idx;
7115                              i != tpr->rx_jmb_prod_idx;
7116                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7117                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7118                                                 TG3_RX_JMB_MAP_SZ);
7119                         }
7120                 }
7121
7122                 return;
7123         }
7124
7125         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7126                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
7127                                 tp->rx_pkt_map_sz);
7128
7129         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7130                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7131                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
7132                                         TG3_RX_JMB_MAP_SZ);
7133         }
7134 }
7135
7136 /* Initialize rx rings for packet processing.
7137  *
7138  * The chip has been shut down and the driver detached from
7139  * the networking, so no interrupts or new tx packets will
7140  * end up in the driver.  tp->{tx,}lock are held and thus
7141  * we may not sleep.
7142  */
7143 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7144                                  struct tg3_rx_prodring_set *tpr)
7145 {
7146         u32 i, rx_pkt_dma_sz;
7147
7148         tpr->rx_std_cons_idx = 0;
7149         tpr->rx_std_prod_idx = 0;
7150         tpr->rx_jmb_cons_idx = 0;
7151         tpr->rx_jmb_prod_idx = 0;
7152
7153         if (tpr != &tp->napi[0].prodring) {
7154                 memset(&tpr->rx_std_buffers[0], 0,
7155                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7156                 if (tpr->rx_jmb_buffers)
7157                         memset(&tpr->rx_jmb_buffers[0], 0,
7158                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7159                 goto done;
7160         }
7161
7162         /* Zero out all descriptors. */
7163         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7164
7165         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7166         if (tg3_flag(tp, 5780_CLASS) &&
7167             tp->dev->mtu > ETH_DATA_LEN)
7168                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7169         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7170
7171         /* Initialize invariants of the rings, we only set this
7172          * stuff once.  This works because the card does not
7173          * write into the rx buffer posting rings.
7174          */
7175         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7176                 struct tg3_rx_buffer_desc *rxd;
7177
7178                 rxd = &tpr->rx_std[i];
7179                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7180                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7181                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7182                                (i << RXD_OPAQUE_INDEX_SHIFT));
7183         }
7184
7185         /* Now allocate fresh SKBs for each rx ring. */
7186         for (i = 0; i < tp->rx_pending; i++) {
7187                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7188                         netdev_warn(tp->dev,
7189                                     "Using a smaller RX standard ring. Only "
7190                                     "%d out of %d buffers were allocated "
7191                                     "successfully\n", i, tp->rx_pending);
7192                         if (i == 0)
7193                                 goto initfail;
7194                         tp->rx_pending = i;
7195                         break;
7196                 }
7197         }
7198
7199         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7200                 goto done;
7201
7202         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7203
7204         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7205                 goto done;
7206
7207         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7208                 struct tg3_rx_buffer_desc *rxd;
7209
7210                 rxd = &tpr->rx_jmb[i].std;
7211                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7212                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7213                                   RXD_FLAG_JUMBO;
7214                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7215                        (i << RXD_OPAQUE_INDEX_SHIFT));
7216         }
7217
7218         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7219                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7220                         netdev_warn(tp->dev,
7221                                     "Using a smaller RX jumbo ring. Only %d "
7222                                     "out of %d buffers were allocated "
7223                                     "successfully\n", i, tp->rx_jumbo_pending);
7224                         if (i == 0)
7225                                 goto initfail;
7226                         tp->rx_jumbo_pending = i;
7227                         break;
7228                 }
7229         }
7230
7231 done:
7232         return 0;
7233
7234 initfail:
7235         tg3_rx_prodring_free(tp, tpr);
7236         return -ENOMEM;
7237 }
7238
7239 static void tg3_rx_prodring_fini(struct tg3 *tp,
7240                                  struct tg3_rx_prodring_set *tpr)
7241 {
7242         kfree(tpr->rx_std_buffers);
7243         tpr->rx_std_buffers = NULL;
7244         kfree(tpr->rx_jmb_buffers);
7245         tpr->rx_jmb_buffers = NULL;
7246         if (tpr->rx_std) {
7247                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7248                                   tpr->rx_std, tpr->rx_std_mapping);
7249                 tpr->rx_std = NULL;
7250         }
7251         if (tpr->rx_jmb) {
7252                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7253                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7254                 tpr->rx_jmb = NULL;
7255         }
7256 }
7257
7258 static int tg3_rx_prodring_init(struct tg3 *tp,
7259                                 struct tg3_rx_prodring_set *tpr)
7260 {
7261         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7262                                       GFP_KERNEL);
7263         if (!tpr->rx_std_buffers)
7264                 return -ENOMEM;
7265
7266         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7267                                          TG3_RX_STD_RING_BYTES(tp),
7268                                          &tpr->rx_std_mapping,
7269                                          GFP_KERNEL);
7270         if (!tpr->rx_std)
7271                 goto err_out;
7272
7273         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7274                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7275                                               GFP_KERNEL);
7276                 if (!tpr->rx_jmb_buffers)
7277                         goto err_out;
7278
7279                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7280                                                  TG3_RX_JMB_RING_BYTES(tp),
7281                                                  &tpr->rx_jmb_mapping,
7282                                                  GFP_KERNEL);
7283                 if (!tpr->rx_jmb)
7284                         goto err_out;
7285         }
7286
7287         return 0;
7288
7289 err_out:
7290         tg3_rx_prodring_fini(tp, tpr);
7291         return -ENOMEM;
7292 }
7293
7294 /* Free up pending packets in all rx/tx rings.
7295  *
7296  * The chip has been shut down and the driver detached from
7297  * the networking, so no interrupts or new tx packets will
7298  * end up in the driver.  tp->{tx,}lock is not held and we are not
7299  * in an interrupt context and thus may sleep.
7300  */
7301 static void tg3_free_rings(struct tg3 *tp)
7302 {
7303         int i, j;
7304
7305         for (j = 0; j < tp->irq_cnt; j++) {
7306                 struct tg3_napi *tnapi = &tp->napi[j];
7307
7308                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7309
7310                 if (!tnapi->tx_buffers)
7311                         continue;
7312
7313                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7314                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7315
7316                         if (!skb)
7317                                 continue;
7318
7319                         tg3_tx_skb_unmap(tnapi, i,
7320                                          skb_shinfo(skb)->nr_frags - 1);
7321
7322                         dev_kfree_skb_any(skb);
7323                 }
7324         }
7325 }
7326
7327 /* Initialize tx/rx rings for packet processing.
7328  *
7329  * The chip has been shut down and the driver detached from
7330  * the networking, so no interrupts or new tx packets will
7331  * end up in the driver.  tp->{tx,}lock are held and thus
7332  * we may not sleep.
7333  */
7334 static int tg3_init_rings(struct tg3 *tp)
7335 {
7336         int i;
7337
7338         /* Free up all the SKBs. */
7339         tg3_free_rings(tp);
7340
7341         for (i = 0; i < tp->irq_cnt; i++) {
7342                 struct tg3_napi *tnapi = &tp->napi[i];
7343
7344                 tnapi->last_tag = 0;
7345                 tnapi->last_irq_tag = 0;
7346                 tnapi->hw_status->status = 0;
7347                 tnapi->hw_status->status_tag = 0;
7348                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7349
7350                 tnapi->tx_prod = 0;
7351                 tnapi->tx_cons = 0;
7352                 if (tnapi->tx_ring)
7353                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7354
7355                 tnapi->rx_rcb_ptr = 0;
7356                 if (tnapi->rx_rcb)
7357                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7358
7359                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7360                         tg3_free_rings(tp);
7361                         return -ENOMEM;
7362                 }
7363         }
7364
7365         return 0;
7366 }
7367
7368 /*
7369  * Must not be invoked with interrupt sources disabled and
7370  * the hardware shutdown down.
7371  */
7372 static void tg3_free_consistent(struct tg3 *tp)
7373 {
7374         int i;
7375
7376         for (i = 0; i < tp->irq_cnt; i++) {
7377                 struct tg3_napi *tnapi = &tp->napi[i];
7378
7379                 if (tnapi->tx_ring) {
7380                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7381                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7382                         tnapi->tx_ring = NULL;
7383                 }
7384
7385                 kfree(tnapi->tx_buffers);
7386                 tnapi->tx_buffers = NULL;
7387
7388                 if (tnapi->rx_rcb) {
7389                         dma_free_coherent(&tp->pdev->dev,
7390                                           TG3_RX_RCB_RING_BYTES(tp),
7391                                           tnapi->rx_rcb,
7392                                           tnapi->rx_rcb_mapping);
7393                         tnapi->rx_rcb = NULL;
7394                 }
7395
7396                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7397
7398                 if (tnapi->hw_status) {
7399                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7400                                           tnapi->hw_status,
7401                                           tnapi->status_mapping);
7402                         tnapi->hw_status = NULL;
7403                 }
7404         }
7405
7406         if (tp->hw_stats) {
7407                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7408                                   tp->hw_stats, tp->stats_mapping);
7409                 tp->hw_stats = NULL;
7410         }
7411 }
7412
7413 /*
7414  * Must not be invoked with interrupt sources disabled and
7415  * the hardware shutdown down.  Can sleep.
7416  */
7417 static int tg3_alloc_consistent(struct tg3 *tp)
7418 {
7419         int i;
7420
7421         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7422                                           sizeof(struct tg3_hw_stats),
7423                                           &tp->stats_mapping,
7424                                           GFP_KERNEL);
7425         if (!tp->hw_stats)
7426                 goto err_out;
7427
7428         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7429
7430         for (i = 0; i < tp->irq_cnt; i++) {
7431                 struct tg3_napi *tnapi = &tp->napi[i];
7432                 struct tg3_hw_status *sblk;
7433
7434                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7435                                                       TG3_HW_STATUS_SIZE,
7436                                                       &tnapi->status_mapping,
7437                                                       GFP_KERNEL);
7438                 if (!tnapi->hw_status)
7439                         goto err_out;
7440
7441                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7442                 sblk = tnapi->hw_status;
7443
7444                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7445                         goto err_out;
7446
7447                 /* If multivector TSS is enabled, vector 0 does not handle
7448                  * tx interrupts.  Don't allocate any resources for it.
7449                  */
7450                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7451                     (i && tg3_flag(tp, ENABLE_TSS))) {
7452                         tnapi->tx_buffers = kzalloc(
7453                                                sizeof(struct tg3_tx_ring_info) *
7454                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7455                         if (!tnapi->tx_buffers)
7456                                 goto err_out;
7457
7458                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7459                                                             TG3_TX_RING_BYTES,
7460                                                         &tnapi->tx_desc_mapping,
7461                                                             GFP_KERNEL);
7462                         if (!tnapi->tx_ring)
7463                                 goto err_out;
7464                 }
7465
7466                 /*
7467                  * When RSS is enabled, the status block format changes
7468                  * slightly.  The "rx_jumbo_consumer", "reserved",
7469                  * and "rx_mini_consumer" members get mapped to the
7470                  * other three rx return ring producer indexes.
7471                  */
7472                 switch (i) {
7473                 default:
7474                         if (tg3_flag(tp, ENABLE_RSS)) {
7475                                 tnapi->rx_rcb_prod_idx = NULL;
7476                                 break;
7477                         }
7478                         /* Fall through */
7479                 case 1:
7480                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7481                         break;
7482                 case 2:
7483                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7484                         break;
7485                 case 3:
7486                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7487                         break;
7488                 case 4:
7489                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7490                         break;
7491                 }
7492
7493                 /*
7494                  * If multivector RSS is enabled, vector 0 does not handle
7495                  * rx or tx interrupts.  Don't allocate any resources for it.
7496                  */
7497                 if (!i && tg3_flag(tp, ENABLE_RSS))
7498                         continue;
7499
7500                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7501                                                    TG3_RX_RCB_RING_BYTES(tp),
7502                                                    &tnapi->rx_rcb_mapping,
7503                                                    GFP_KERNEL);
7504                 if (!tnapi->rx_rcb)
7505                         goto err_out;
7506
7507                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7508         }
7509
7510         return 0;
7511
7512 err_out:
7513         tg3_free_consistent(tp);
7514         return -ENOMEM;
7515 }
7516
7517 #define MAX_WAIT_CNT 1000
7518
7519 /* To stop a block, clear the enable bit and poll till it
7520  * clears.  tp->lock is held.
7521  */
7522 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7523 {
7524         unsigned int i;
7525         u32 val;
7526
7527         if (tg3_flag(tp, 5705_PLUS)) {
7528                 switch (ofs) {
7529                 case RCVLSC_MODE:
7530                 case DMAC_MODE:
7531                 case MBFREE_MODE:
7532                 case BUFMGR_MODE:
7533                 case MEMARB_MODE:
7534                         /* We can't enable/disable these bits of the
7535                          * 5705/5750, just say success.
7536                          */
7537                         return 0;
7538
7539                 default:
7540                         break;
7541                 }
7542         }
7543
7544         val = tr32(ofs);
7545         val &= ~enable_bit;
7546         tw32_f(ofs, val);
7547
7548         for (i = 0; i < MAX_WAIT_CNT; i++) {
7549                 udelay(100);
7550                 val = tr32(ofs);
7551                 if ((val & enable_bit) == 0)
7552                         break;
7553         }
7554
7555         if (i == MAX_WAIT_CNT && !silent) {
7556                 dev_err(&tp->pdev->dev,
7557                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7558                         ofs, enable_bit);
7559                 return -ENODEV;
7560         }
7561
7562         return 0;
7563 }
7564
7565 /* tp->lock is held. */
7566 static int tg3_abort_hw(struct tg3 *tp, int silent)
7567 {
7568         int i, err;
7569
7570         tg3_disable_ints(tp);
7571
7572         tp->rx_mode &= ~RX_MODE_ENABLE;
7573         tw32_f(MAC_RX_MODE, tp->rx_mode);
7574         udelay(10);
7575
7576         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7577         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7578         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7579         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7580         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7581         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7582
7583         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7584         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7585         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7586         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7587         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7588         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7589         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7590
7591         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7592         tw32_f(MAC_MODE, tp->mac_mode);
7593         udelay(40);
7594
7595         tp->tx_mode &= ~TX_MODE_ENABLE;
7596         tw32_f(MAC_TX_MODE, tp->tx_mode);
7597
7598         for (i = 0; i < MAX_WAIT_CNT; i++) {
7599                 udelay(100);
7600                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7601                         break;
7602         }
7603         if (i >= MAX_WAIT_CNT) {
7604                 dev_err(&tp->pdev->dev,
7605                         "%s timed out, TX_MODE_ENABLE will not clear "
7606                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7607                 err |= -ENODEV;
7608         }
7609
7610         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7611         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7612         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7613
7614         tw32(FTQ_RESET, 0xffffffff);
7615         tw32(FTQ_RESET, 0x00000000);
7616
7617         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7618         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7619
7620         for (i = 0; i < tp->irq_cnt; i++) {
7621                 struct tg3_napi *tnapi = &tp->napi[i];
7622                 if (tnapi->hw_status)
7623                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7624         }
7625         if (tp->hw_stats)
7626                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7627
7628         return err;
7629 }
7630
7631 /* Save PCI command register before chip reset */
7632 static void tg3_save_pci_state(struct tg3 *tp)
7633 {
7634         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7635 }
7636
7637 /* Restore PCI state after chip reset */
7638 static void tg3_restore_pci_state(struct tg3 *tp)
7639 {
7640         u32 val;
7641
7642         /* Re-enable indirect register accesses. */
7643         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7644                                tp->misc_host_ctrl);
7645
7646         /* Set MAX PCI retry to zero. */
7647         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7648         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7649             tg3_flag(tp, PCIX_MODE))
7650                 val |= PCISTATE_RETRY_SAME_DMA;
7651         /* Allow reads and writes to the APE register and memory space. */
7652         if (tg3_flag(tp, ENABLE_APE))
7653                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7654                        PCISTATE_ALLOW_APE_SHMEM_WR |
7655                        PCISTATE_ALLOW_APE_PSPACE_WR;
7656         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7657
7658         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7659
7660         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7661                 if (tg3_flag(tp, PCI_EXPRESS))
7662                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7663                 else {
7664                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7665                                               tp->pci_cacheline_sz);
7666                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7667                                               tp->pci_lat_timer);
7668                 }
7669         }
7670
7671         /* Make sure PCI-X relaxed ordering bit is clear. */
7672         if (tg3_flag(tp, PCIX_MODE)) {
7673                 u16 pcix_cmd;
7674
7675                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7676                                      &pcix_cmd);
7677                 pcix_cmd &= ~PCI_X_CMD_ERO;
7678                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7679                                       pcix_cmd);
7680         }
7681
7682         if (tg3_flag(tp, 5780_CLASS)) {
7683
7684                 /* Chip reset on 5780 will reset MSI enable bit,
7685                  * so need to restore it.
7686                  */
7687                 if (tg3_flag(tp, USING_MSI)) {
7688                         u16 ctrl;
7689
7690                         pci_read_config_word(tp->pdev,
7691                                              tp->msi_cap + PCI_MSI_FLAGS,
7692                                              &ctrl);
7693                         pci_write_config_word(tp->pdev,
7694                                               tp->msi_cap + PCI_MSI_FLAGS,
7695                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7696                         val = tr32(MSGINT_MODE);
7697                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7698                 }
7699         }
7700 }
7701
7702 /* tp->lock is held. */
7703 static int tg3_chip_reset(struct tg3 *tp)
7704 {
7705         u32 val;
7706         void (*write_op)(struct tg3 *, u32, u32);
7707         int i, err;
7708
7709         tg3_nvram_lock(tp);
7710
7711         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7712
7713         /* No matching tg3_nvram_unlock() after this because
7714          * chip reset below will undo the nvram lock.
7715          */
7716         tp->nvram_lock_cnt = 0;
7717
7718         /* GRC_MISC_CFG core clock reset will clear the memory
7719          * enable bit in PCI register 4 and the MSI enable bit
7720          * on some chips, so we save relevant registers here.
7721          */
7722         tg3_save_pci_state(tp);
7723
7724         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7725             tg3_flag(tp, 5755_PLUS))
7726                 tw32(GRC_FASTBOOT_PC, 0);
7727
7728         /*
7729          * We must avoid the readl() that normally takes place.
7730          * It locks machines, causes machine checks, and other
7731          * fun things.  So, temporarily disable the 5701
7732          * hardware workaround, while we do the reset.
7733          */
7734         write_op = tp->write32;
7735         if (write_op == tg3_write_flush_reg32)
7736                 tp->write32 = tg3_write32;
7737
7738         /* Prevent the irq handler from reading or writing PCI registers
7739          * during chip reset when the memory enable bit in the PCI command
7740          * register may be cleared.  The chip does not generate interrupt
7741          * at this time, but the irq handler may still be called due to irq
7742          * sharing or irqpoll.
7743          */
7744         tg3_flag_set(tp, CHIP_RESETTING);
7745         for (i = 0; i < tp->irq_cnt; i++) {
7746                 struct tg3_napi *tnapi = &tp->napi[i];
7747                 if (tnapi->hw_status) {
7748                         tnapi->hw_status->status = 0;
7749                         tnapi->hw_status->status_tag = 0;
7750                 }
7751                 tnapi->last_tag = 0;
7752                 tnapi->last_irq_tag = 0;
7753         }
7754         smp_mb();
7755
7756         for (i = 0; i < tp->irq_cnt; i++)
7757                 synchronize_irq(tp->napi[i].irq_vec);
7758
7759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7760                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7761                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7762         }
7763
7764         /* do the reset */
7765         val = GRC_MISC_CFG_CORECLK_RESET;
7766
7767         if (tg3_flag(tp, PCI_EXPRESS)) {
7768                 /* Force PCIe 1.0a mode */
7769                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7770                     !tg3_flag(tp, 57765_PLUS) &&
7771                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7772                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7773                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7774
7775                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7776                         tw32(GRC_MISC_CFG, (1 << 29));
7777                         val |= (1 << 29);
7778                 }
7779         }
7780
7781         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7782                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7783                 tw32(GRC_VCPU_EXT_CTRL,
7784                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7785         }
7786
7787         /* Manage gphy power for all CPMU absent PCIe devices. */
7788         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7789                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7790
7791         tw32(GRC_MISC_CFG, val);
7792
7793         /* restore 5701 hardware bug workaround write method */
7794         tp->write32 = write_op;
7795
7796         /* Unfortunately, we have to delay before the PCI read back.
7797          * Some 575X chips even will not respond to a PCI cfg access
7798          * when the reset command is given to the chip.
7799          *
7800          * How do these hardware designers expect things to work
7801          * properly if the PCI write is posted for a long period
7802          * of time?  It is always necessary to have some method by
7803          * which a register read back can occur to push the write
7804          * out which does the reset.
7805          *
7806          * For most tg3 variants the trick below was working.
7807          * Ho hum...
7808          */
7809         udelay(120);
7810
7811         /* Flush PCI posted writes.  The normal MMIO registers
7812          * are inaccessible at this time so this is the only
7813          * way to make this reliably (actually, this is no longer
7814          * the case, see above).  I tried to use indirect
7815          * register read/write but this upset some 5701 variants.
7816          */
7817         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7818
7819         udelay(120);
7820
7821         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7822                 u16 val16;
7823
7824                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7825                         int i;
7826                         u32 cfg_val;
7827
7828                         /* Wait for link training to complete.  */
7829                         for (i = 0; i < 5000; i++)
7830                                 udelay(100);
7831
7832                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7833                         pci_write_config_dword(tp->pdev, 0xc4,
7834                                                cfg_val | (1 << 15));
7835                 }
7836
7837                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7838                 pci_read_config_word(tp->pdev,
7839                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7840                                      &val16);
7841                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7842                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7843                 /*
7844                  * Older PCIe devices only support the 128 byte
7845                  * MPS setting.  Enforce the restriction.
7846                  */
7847                 if (!tg3_flag(tp, CPMU_PRESENT))
7848                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7849                 pci_write_config_word(tp->pdev,
7850                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7851                                       val16);
7852
7853                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7854
7855                 /* Clear error status */
7856                 pci_write_config_word(tp->pdev,
7857                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7858                                       PCI_EXP_DEVSTA_CED |
7859                                       PCI_EXP_DEVSTA_NFED |
7860                                       PCI_EXP_DEVSTA_FED |
7861                                       PCI_EXP_DEVSTA_URD);
7862         }
7863
7864         tg3_restore_pci_state(tp);
7865
7866         tg3_flag_clear(tp, CHIP_RESETTING);
7867         tg3_flag_clear(tp, ERROR_PROCESSED);
7868
7869         val = 0;
7870         if (tg3_flag(tp, 5780_CLASS))
7871                 val = tr32(MEMARB_MODE);
7872         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7873
7874         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7875                 tg3_stop_fw(tp);
7876                 tw32(0x5000, 0x400);
7877         }
7878
7879         tw32(GRC_MODE, tp->grc_mode);
7880
7881         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7882                 val = tr32(0xc4);
7883
7884                 tw32(0xc4, val | (1 << 15));
7885         }
7886
7887         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7888             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7889                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7890                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7891                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7892                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7893         }
7894
7895         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7896                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7897                 val = tp->mac_mode;
7898         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7899                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7900                 val = tp->mac_mode;
7901         } else
7902                 val = 0;
7903
7904         tw32_f(MAC_MODE, val);
7905         udelay(40);
7906
7907         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7908
7909         err = tg3_poll_fw(tp);
7910         if (err)
7911                 return err;
7912
7913         tg3_mdio_start(tp);
7914
7915         if (tg3_flag(tp, PCI_EXPRESS) &&
7916             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7917             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7918             !tg3_flag(tp, 57765_PLUS)) {
7919                 val = tr32(0x7c00);
7920
7921                 tw32(0x7c00, val | (1 << 25));
7922         }
7923
7924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7925                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7926                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7927         }
7928
7929         /* Reprobe ASF enable state.  */
7930         tg3_flag_clear(tp, ENABLE_ASF);
7931         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7932         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7933         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7934                 u32 nic_cfg;
7935
7936                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7937                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7938                         tg3_flag_set(tp, ENABLE_ASF);
7939                         tp->last_event_jiffies = jiffies;
7940                         if (tg3_flag(tp, 5750_PLUS))
7941                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7942                 }
7943         }
7944
7945         return 0;
7946 }
7947
7948 /* tp->lock is held. */
7949 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7950 {
7951         int err;
7952
7953         tg3_stop_fw(tp);
7954
7955         tg3_write_sig_pre_reset(tp, kind);
7956
7957         tg3_abort_hw(tp, silent);
7958         err = tg3_chip_reset(tp);
7959
7960         __tg3_set_mac_addr(tp, 0);
7961
7962         tg3_write_sig_legacy(tp, kind);
7963         tg3_write_sig_post_reset(tp, kind);
7964
7965         if (err)
7966                 return err;
7967
7968         return 0;
7969 }
7970
7971 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7972 {
7973         struct tg3 *tp = netdev_priv(dev);
7974         struct sockaddr *addr = p;
7975         int err = 0, skip_mac_1 = 0;
7976
7977         if (!is_valid_ether_addr(addr->sa_data))
7978                 return -EINVAL;
7979
7980         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7981
7982         if (!netif_running(dev))
7983                 return 0;
7984
7985         if (tg3_flag(tp, ENABLE_ASF)) {
7986                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7987
7988                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7989                 addr0_low = tr32(MAC_ADDR_0_LOW);
7990                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7991                 addr1_low = tr32(MAC_ADDR_1_LOW);
7992
7993                 /* Skip MAC addr 1 if ASF is using it. */
7994                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7995                     !(addr1_high == 0 && addr1_low == 0))
7996                         skip_mac_1 = 1;
7997         }
7998         spin_lock_bh(&tp->lock);
7999         __tg3_set_mac_addr(tp, skip_mac_1);
8000         spin_unlock_bh(&tp->lock);
8001
8002         return err;
8003 }
8004
8005 /* tp->lock is held. */
8006 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8007                            dma_addr_t mapping, u32 maxlen_flags,
8008                            u32 nic_addr)
8009 {
8010         tg3_write_mem(tp,
8011                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8012                       ((u64) mapping >> 32));
8013         tg3_write_mem(tp,
8014                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8015                       ((u64) mapping & 0xffffffff));
8016         tg3_write_mem(tp,
8017                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8018                        maxlen_flags);
8019
8020         if (!tg3_flag(tp, 5705_PLUS))
8021                 tg3_write_mem(tp,
8022                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8023                               nic_addr);
8024 }
8025
8026 static void __tg3_set_rx_mode(struct net_device *);
8027 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8028 {
8029         int i;
8030
8031         if (!tg3_flag(tp, ENABLE_TSS)) {
8032                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8033                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8034                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8035         } else {
8036                 tw32(HOSTCC_TXCOL_TICKS, 0);
8037                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8038                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8039         }
8040
8041         if (!tg3_flag(tp, ENABLE_RSS)) {
8042                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8043                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8044                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8045         } else {
8046                 tw32(HOSTCC_RXCOL_TICKS, 0);
8047                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8048                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8049         }
8050
8051         if (!tg3_flag(tp, 5705_PLUS)) {
8052                 u32 val = ec->stats_block_coalesce_usecs;
8053
8054                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8055                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8056
8057                 if (!netif_carrier_ok(tp->dev))
8058                         val = 0;
8059
8060                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8061         }
8062
8063         for (i = 0; i < tp->irq_cnt - 1; i++) {
8064                 u32 reg;
8065
8066                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8067                 tw32(reg, ec->rx_coalesce_usecs);
8068                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8069                 tw32(reg, ec->rx_max_coalesced_frames);
8070                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8071                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8072
8073                 if (tg3_flag(tp, ENABLE_TSS)) {
8074                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8075                         tw32(reg, ec->tx_coalesce_usecs);
8076                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8077                         tw32(reg, ec->tx_max_coalesced_frames);
8078                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8079                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8080                 }
8081         }
8082
8083         for (; i < tp->irq_max - 1; i++) {
8084                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8085                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8086                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8087
8088                 if (tg3_flag(tp, ENABLE_TSS)) {
8089                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8090                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8091                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8092                 }
8093         }
8094 }
8095
8096 /* tp->lock is held. */
8097 static void tg3_rings_reset(struct tg3 *tp)
8098 {
8099         int i;
8100         u32 stblk, txrcb, rxrcb, limit;
8101         struct tg3_napi *tnapi = &tp->napi[0];
8102
8103         /* Disable all transmit rings but the first. */
8104         if (!tg3_flag(tp, 5705_PLUS))
8105                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8106         else if (tg3_flag(tp, 5717_PLUS))
8107                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8108         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8109                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8110         else
8111                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8112
8113         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8114              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8115                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8116                               BDINFO_FLAGS_DISABLED);
8117
8118
8119         /* Disable all receive return rings but the first. */
8120         if (tg3_flag(tp, 5717_PLUS))
8121                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8122         else if (!tg3_flag(tp, 5705_PLUS))
8123                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8124         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8125                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8126                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8127         else
8128                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8129
8130         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8131              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8132                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8133                               BDINFO_FLAGS_DISABLED);
8134
8135         /* Disable interrupts */
8136         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8137         tp->napi[0].chk_msi_cnt = 0;
8138         tp->napi[0].last_rx_cons = 0;
8139         tp->napi[0].last_tx_cons = 0;
8140
8141         /* Zero mailbox registers. */
8142         if (tg3_flag(tp, SUPPORT_MSIX)) {
8143                 for (i = 1; i < tp->irq_max; i++) {
8144                         tp->napi[i].tx_prod = 0;
8145                         tp->napi[i].tx_cons = 0;
8146                         if (tg3_flag(tp, ENABLE_TSS))
8147                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8148                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8149                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8150                         tp->napi[i].chk_msi_cnt = 0;
8151                         tp->napi[i].last_rx_cons = 0;
8152                         tp->napi[i].last_tx_cons = 0;
8153                 }
8154                 if (!tg3_flag(tp, ENABLE_TSS))
8155                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8156         } else {
8157                 tp->napi[0].tx_prod = 0;
8158                 tp->napi[0].tx_cons = 0;
8159                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8160                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8161         }
8162
8163         /* Make sure the NIC-based send BD rings are disabled. */
8164         if (!tg3_flag(tp, 5705_PLUS)) {
8165                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8166                 for (i = 0; i < 16; i++)
8167                         tw32_tx_mbox(mbox + i * 8, 0);
8168         }
8169
8170         txrcb = NIC_SRAM_SEND_RCB;
8171         rxrcb = NIC_SRAM_RCV_RET_RCB;
8172
8173         /* Clear status block in ram. */
8174         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8175
8176         /* Set status block DMA address */
8177         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8178              ((u64) tnapi->status_mapping >> 32));
8179         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8180              ((u64) tnapi->status_mapping & 0xffffffff));
8181
8182         if (tnapi->tx_ring) {
8183                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8184                                (TG3_TX_RING_SIZE <<
8185                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8186                                NIC_SRAM_TX_BUFFER_DESC);
8187                 txrcb += TG3_BDINFO_SIZE;
8188         }
8189
8190         if (tnapi->rx_rcb) {
8191                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8192                                (tp->rx_ret_ring_mask + 1) <<
8193                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8194                 rxrcb += TG3_BDINFO_SIZE;
8195         }
8196
8197         stblk = HOSTCC_STATBLCK_RING1;
8198
8199         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8200                 u64 mapping = (u64)tnapi->status_mapping;
8201                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8202                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8203
8204                 /* Clear status block in ram. */
8205                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8206
8207                 if (tnapi->tx_ring) {
8208                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8209                                        (TG3_TX_RING_SIZE <<
8210                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8211                                        NIC_SRAM_TX_BUFFER_DESC);
8212                         txrcb += TG3_BDINFO_SIZE;
8213                 }
8214
8215                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8216                                ((tp->rx_ret_ring_mask + 1) <<
8217                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8218
8219                 stblk += 8;
8220                 rxrcb += TG3_BDINFO_SIZE;
8221         }
8222 }
8223
8224 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8225 {
8226         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8227
8228         if (!tg3_flag(tp, 5750_PLUS) ||
8229             tg3_flag(tp, 5780_CLASS) ||
8230             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8232                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8233         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8234                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8235                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8236         else
8237                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8238
8239         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8240         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8241
8242         val = min(nic_rep_thresh, host_rep_thresh);
8243         tw32(RCVBDI_STD_THRESH, val);
8244
8245         if (tg3_flag(tp, 57765_PLUS))
8246                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8247
8248         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8249                 return;
8250
8251         if (!tg3_flag(tp, 5705_PLUS))
8252                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8253         else
8254                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
8255
8256         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8257
8258         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8259         tw32(RCVBDI_JUMBO_THRESH, val);
8260
8261         if (tg3_flag(tp, 57765_PLUS))
8262                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8263 }
8264
8265 /* tp->lock is held. */
8266 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8267 {
8268         u32 val, rdmac_mode;
8269         int i, err, limit;
8270         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8271
8272         tg3_disable_ints(tp);
8273
8274         tg3_stop_fw(tp);
8275
8276         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8277
8278         if (tg3_flag(tp, INIT_COMPLETE))
8279                 tg3_abort_hw(tp, 1);
8280
8281         /* Enable MAC control of LPI */
8282         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8283                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8284                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8285                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8286
8287                 tw32_f(TG3_CPMU_EEE_CTRL,
8288                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8289
8290                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8291                       TG3_CPMU_EEEMD_LPI_IN_TX |
8292                       TG3_CPMU_EEEMD_LPI_IN_RX |
8293                       TG3_CPMU_EEEMD_EEE_ENABLE;
8294
8295                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8296                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8297
8298                 if (tg3_flag(tp, ENABLE_APE))
8299                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8300
8301                 tw32_f(TG3_CPMU_EEE_MODE, val);
8302
8303                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8304                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8305                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8306
8307                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8308                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8309                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8310         }
8311
8312         if (reset_phy)
8313                 tg3_phy_reset(tp);
8314
8315         err = tg3_chip_reset(tp);
8316         if (err)
8317                 return err;
8318
8319         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8320
8321         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8322                 val = tr32(TG3_CPMU_CTRL);
8323                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8324                 tw32(TG3_CPMU_CTRL, val);
8325
8326                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8327                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8328                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8329                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8330
8331                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8332                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8333                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8334                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8335
8336                 val = tr32(TG3_CPMU_HST_ACC);
8337                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8338                 val |= CPMU_HST_ACC_MACCLK_6_25;
8339                 tw32(TG3_CPMU_HST_ACC, val);
8340         }
8341
8342         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8343                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8344                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8345                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8346                 tw32(PCIE_PWR_MGMT_THRESH, val);
8347
8348                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8349                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8350
8351                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8352
8353                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8354                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8355         }
8356
8357         if (tg3_flag(tp, L1PLLPD_EN)) {
8358                 u32 grc_mode = tr32(GRC_MODE);
8359
8360                 /* Access the lower 1K of PL PCIE block registers. */
8361                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8362                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8363
8364                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8365                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8366                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8367
8368                 tw32(GRC_MODE, grc_mode);
8369         }
8370
8371         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
8372                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8373                         u32 grc_mode = tr32(GRC_MODE);
8374
8375                         /* Access the lower 1K of PL PCIE block registers. */
8376                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8377                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8378
8379                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8380                                    TG3_PCIE_PL_LO_PHYCTL5);
8381                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8382                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8383
8384                         tw32(GRC_MODE, grc_mode);
8385                 }
8386
8387                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8388                         u32 grc_mode = tr32(GRC_MODE);
8389
8390                         /* Access the lower 1K of DL PCIE block registers. */
8391                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8392                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8393
8394                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8395                                    TG3_PCIE_DL_LO_FTSMAX);
8396                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8397                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8398                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8399
8400                         tw32(GRC_MODE, grc_mode);
8401                 }
8402
8403                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8404                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8405                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8406                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8407         }
8408
8409         /* This works around an issue with Athlon chipsets on
8410          * B3 tigon3 silicon.  This bit has no effect on any
8411          * other revision.  But do not set this on PCI Express
8412          * chips and don't even touch the clocks if the CPMU is present.
8413          */
8414         if (!tg3_flag(tp, CPMU_PRESENT)) {
8415                 if (!tg3_flag(tp, PCI_EXPRESS))
8416                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8417                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8418         }
8419
8420         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8421             tg3_flag(tp, PCIX_MODE)) {
8422                 val = tr32(TG3PCI_PCISTATE);
8423                 val |= PCISTATE_RETRY_SAME_DMA;
8424                 tw32(TG3PCI_PCISTATE, val);
8425         }
8426
8427         if (tg3_flag(tp, ENABLE_APE)) {
8428                 /* Allow reads and writes to the
8429                  * APE register and memory space.
8430                  */
8431                 val = tr32(TG3PCI_PCISTATE);
8432                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8433                        PCISTATE_ALLOW_APE_SHMEM_WR |
8434                        PCISTATE_ALLOW_APE_PSPACE_WR;
8435                 tw32(TG3PCI_PCISTATE, val);
8436         }
8437
8438         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8439                 /* Enable some hw fixes.  */
8440                 val = tr32(TG3PCI_MSI_DATA);
8441                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8442                 tw32(TG3PCI_MSI_DATA, val);
8443         }
8444
8445         /* Descriptor ring init may make accesses to the
8446          * NIC SRAM area to setup the TX descriptors, so we
8447          * can only do this after the hardware has been
8448          * successfully reset.
8449          */
8450         err = tg3_init_rings(tp);
8451         if (err)
8452                 return err;
8453
8454         if (tg3_flag(tp, 57765_PLUS)) {
8455                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8456                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8457                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8458                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8459                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8460                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8461                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8462                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8463         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8464                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8465                 /* This value is determined during the probe time DMA
8466                  * engine test, tg3_test_dma.
8467                  */
8468                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8469         }
8470
8471         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8472                           GRC_MODE_4X_NIC_SEND_RINGS |
8473                           GRC_MODE_NO_TX_PHDR_CSUM |
8474                           GRC_MODE_NO_RX_PHDR_CSUM);
8475         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8476
8477         /* Pseudo-header checksum is done by hardware logic and not
8478          * the offload processers, so make the chip do the pseudo-
8479          * header checksums on receive.  For transmit it is more
8480          * convenient to do the pseudo-header checksum in software
8481          * as Linux does that on transmit for us in all cases.
8482          */
8483         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8484
8485         tw32(GRC_MODE,
8486              tp->grc_mode |
8487              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8488
8489         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8490         val = tr32(GRC_MISC_CFG);
8491         val &= ~0xff;
8492         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8493         tw32(GRC_MISC_CFG, val);
8494
8495         /* Initialize MBUF/DESC pool. */
8496         if (tg3_flag(tp, 5750_PLUS)) {
8497                 /* Do nothing.  */
8498         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8499                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8500                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8501                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8502                 else
8503                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8504                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8505                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8506         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8507                 int fw_len;
8508
8509                 fw_len = tp->fw_len;
8510                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8511                 tw32(BUFMGR_MB_POOL_ADDR,
8512                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8513                 tw32(BUFMGR_MB_POOL_SIZE,
8514                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8515         }
8516
8517         if (tp->dev->mtu <= ETH_DATA_LEN) {
8518                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8519                      tp->bufmgr_config.mbuf_read_dma_low_water);
8520                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8521                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8522                 tw32(BUFMGR_MB_HIGH_WATER,
8523                      tp->bufmgr_config.mbuf_high_water);
8524         } else {
8525                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8526                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8527                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8528                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8529                 tw32(BUFMGR_MB_HIGH_WATER,
8530                      tp->bufmgr_config.mbuf_high_water_jumbo);
8531         }
8532         tw32(BUFMGR_DMA_LOW_WATER,
8533              tp->bufmgr_config.dma_low_water);
8534         tw32(BUFMGR_DMA_HIGH_WATER,
8535              tp->bufmgr_config.dma_high_water);
8536
8537         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8538         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8539                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8540         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8541             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8542             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8543                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8544         tw32(BUFMGR_MODE, val);
8545         for (i = 0; i < 2000; i++) {
8546                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8547                         break;
8548                 udelay(10);
8549         }
8550         if (i >= 2000) {
8551                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8552                 return -ENODEV;
8553         }
8554
8555         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8556                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8557
8558         tg3_setup_rxbd_thresholds(tp);
8559
8560         /* Initialize TG3_BDINFO's at:
8561          *  RCVDBDI_STD_BD:     standard eth size rx ring
8562          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8563          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8564          *
8565          * like so:
8566          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8567          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8568          *                              ring attribute flags
8569          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8570          *
8571          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8572          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8573          *
8574          * The size of each ring is fixed in the firmware, but the location is
8575          * configurable.
8576          */
8577         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8578              ((u64) tpr->rx_std_mapping >> 32));
8579         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8580              ((u64) tpr->rx_std_mapping & 0xffffffff));
8581         if (!tg3_flag(tp, 5717_PLUS))
8582                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8583                      NIC_SRAM_RX_BUFFER_DESC);
8584
8585         /* Disable the mini ring */
8586         if (!tg3_flag(tp, 5705_PLUS))
8587                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8588                      BDINFO_FLAGS_DISABLED);
8589
8590         /* Program the jumbo buffer descriptor ring control
8591          * blocks on those devices that have them.
8592          */
8593         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8594             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8595
8596                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8597                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8598                              ((u64) tpr->rx_jmb_mapping >> 32));
8599                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8600                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8601                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8602                               BDINFO_FLAGS_MAXLEN_SHIFT;
8603                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8604                              val | BDINFO_FLAGS_USE_EXT_RECV);
8605                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8606                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8607                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8608                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8609                 } else {
8610                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8611                              BDINFO_FLAGS_DISABLED);
8612                 }
8613
8614                 if (tg3_flag(tp, 57765_PLUS)) {
8615                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8616                                 val = TG3_RX_STD_MAX_SIZE_5700;
8617                         else
8618                                 val = TG3_RX_STD_MAX_SIZE_5717;
8619                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8620                         val |= (TG3_RX_STD_DMA_SZ << 2);
8621                 } else
8622                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8623         } else
8624                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8625
8626         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8627
8628         tpr->rx_std_prod_idx = tp->rx_pending;
8629         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8630
8631         tpr->rx_jmb_prod_idx =
8632                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8633         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8634
8635         tg3_rings_reset(tp);
8636
8637         /* Initialize MAC address and backoff seed. */
8638         __tg3_set_mac_addr(tp, 0);
8639
8640         /* MTU + ethernet header + FCS + optional VLAN tag */
8641         tw32(MAC_RX_MTU_SIZE,
8642              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8643
8644         /* The slot time is changed by tg3_setup_phy if we
8645          * run at gigabit with half duplex.
8646          */
8647         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8648               (6 << TX_LENGTHS_IPG_SHIFT) |
8649               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8650
8651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8652                 val |= tr32(MAC_TX_LENGTHS) &
8653                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8654                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8655
8656         tw32(MAC_TX_LENGTHS, val);
8657
8658         /* Receive rules. */
8659         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8660         tw32(RCVLPC_CONFIG, 0x0181);
8661
8662         /* Calculate RDMAC_MODE setting early, we need it to determine
8663          * the RCVLPC_STATE_ENABLE mask.
8664          */
8665         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8666                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8667                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8668                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8669                       RDMAC_MODE_LNGREAD_ENAB);
8670
8671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8672                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8673
8674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8677                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8678                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8679                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8680
8681         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8682             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8683                 if (tg3_flag(tp, TSO_CAPABLE) &&
8684                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8685                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8686                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8687                            !tg3_flag(tp, IS_5788)) {
8688                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8689                 }
8690         }
8691
8692         if (tg3_flag(tp, PCI_EXPRESS))
8693                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8694
8695         if (tg3_flag(tp, HW_TSO_1) ||
8696             tg3_flag(tp, HW_TSO_2) ||
8697             tg3_flag(tp, HW_TSO_3))
8698                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8699
8700         if (tg3_flag(tp, 57765_PLUS) ||
8701             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8702             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8703                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8704
8705         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8706                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8707
8708         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8711             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8712             tg3_flag(tp, 57765_PLUS)) {
8713                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8714                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8715                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8716                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8717                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8718                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8719                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8720                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8721                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8722                 }
8723                 tw32(TG3_RDMA_RSRVCTRL_REG,
8724                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8725         }
8726
8727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8728             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8729                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8730                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8731                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8732                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8733         }
8734
8735         /* Receive/send statistics. */
8736         if (tg3_flag(tp, 5750_PLUS)) {
8737                 val = tr32(RCVLPC_STATS_ENABLE);
8738                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8739                 tw32(RCVLPC_STATS_ENABLE, val);
8740         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8741                    tg3_flag(tp, TSO_CAPABLE)) {
8742                 val = tr32(RCVLPC_STATS_ENABLE);
8743                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8744                 tw32(RCVLPC_STATS_ENABLE, val);
8745         } else {
8746                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8747         }
8748         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8749         tw32(SNDDATAI_STATSENAB, 0xffffff);
8750         tw32(SNDDATAI_STATSCTRL,
8751              (SNDDATAI_SCTRL_ENABLE |
8752               SNDDATAI_SCTRL_FASTUPD));
8753
8754         /* Setup host coalescing engine. */
8755         tw32(HOSTCC_MODE, 0);
8756         for (i = 0; i < 2000; i++) {
8757                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8758                         break;
8759                 udelay(10);
8760         }
8761
8762         __tg3_set_coalesce(tp, &tp->coal);
8763
8764         if (!tg3_flag(tp, 5705_PLUS)) {
8765                 /* Status/statistics block address.  See tg3_timer,
8766                  * the tg3_periodic_fetch_stats call there, and
8767                  * tg3_get_stats to see how this works for 5705/5750 chips.
8768                  */
8769                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8770                      ((u64) tp->stats_mapping >> 32));
8771                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8772                      ((u64) tp->stats_mapping & 0xffffffff));
8773                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8774
8775                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8776
8777                 /* Clear statistics and status block memory areas */
8778                 for (i = NIC_SRAM_STATS_BLK;
8779                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8780                      i += sizeof(u32)) {
8781                         tg3_write_mem(tp, i, 0);
8782                         udelay(40);
8783                 }
8784         }
8785
8786         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8787
8788         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8789         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8790         if (!tg3_flag(tp, 5705_PLUS))
8791                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8792
8793         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8794                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8795                 /* reset to prevent losing 1st rx packet intermittently */
8796                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8797                 udelay(10);
8798         }
8799
8800         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8801                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8802                         MAC_MODE_FHDE_ENABLE;
8803         if (tg3_flag(tp, ENABLE_APE))
8804                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8805         if (!tg3_flag(tp, 5705_PLUS) &&
8806             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8807             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8808                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8809         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8810         udelay(40);
8811
8812         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8813          * If TG3_FLAG_IS_NIC is zero, we should read the
8814          * register to preserve the GPIO settings for LOMs. The GPIOs,
8815          * whether used as inputs or outputs, are set by boot code after
8816          * reset.
8817          */
8818         if (!tg3_flag(tp, IS_NIC)) {
8819                 u32 gpio_mask;
8820
8821                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8822                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8823                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8824
8825                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8826                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8827                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8828
8829                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8830                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8831
8832                 tp->grc_local_ctrl &= ~gpio_mask;
8833                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8834
8835                 /* GPIO1 must be driven high for eeprom write protect */
8836                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8837                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8838                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8839         }
8840         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8841         udelay(100);
8842
8843         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8844                 val = tr32(MSGINT_MODE);
8845                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8846                 if (!tg3_flag(tp, 1SHOT_MSI))
8847                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
8848                 tw32(MSGINT_MODE, val);
8849         }
8850
8851         if (!tg3_flag(tp, 5705_PLUS)) {
8852                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8853                 udelay(40);
8854         }
8855
8856         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8857                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8858                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8859                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8860                WDMAC_MODE_LNGREAD_ENAB);
8861
8862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8863             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8864                 if (tg3_flag(tp, TSO_CAPABLE) &&
8865                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8866                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8867                         /* nothing */
8868                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8869                            !tg3_flag(tp, IS_5788)) {
8870                         val |= WDMAC_MODE_RX_ACCEL;
8871                 }
8872         }
8873
8874         /* Enable host coalescing bug fix */
8875         if (tg3_flag(tp, 5755_PLUS))
8876                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8877
8878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8879                 val |= WDMAC_MODE_BURST_ALL_DATA;
8880
8881         tw32_f(WDMAC_MODE, val);
8882         udelay(40);
8883
8884         if (tg3_flag(tp, PCIX_MODE)) {
8885                 u16 pcix_cmd;
8886
8887                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8888                                      &pcix_cmd);
8889                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8890                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8891                         pcix_cmd |= PCI_X_CMD_READ_2K;
8892                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8893                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8894                         pcix_cmd |= PCI_X_CMD_READ_2K;
8895                 }
8896                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8897                                       pcix_cmd);
8898         }
8899
8900         tw32_f(RDMAC_MODE, rdmac_mode);
8901         udelay(40);
8902
8903         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8904         if (!tg3_flag(tp, 5705_PLUS))
8905                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8906
8907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8908                 tw32(SNDDATAC_MODE,
8909                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8910         else
8911                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8912
8913         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8914         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8915         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8916         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8917                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8918         tw32(RCVDBDI_MODE, val);
8919         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8920         if (tg3_flag(tp, HW_TSO_1) ||
8921             tg3_flag(tp, HW_TSO_2) ||
8922             tg3_flag(tp, HW_TSO_3))
8923                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8924         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8925         if (tg3_flag(tp, ENABLE_TSS))
8926                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8927         tw32(SNDBDI_MODE, val);
8928         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8929
8930         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8931                 err = tg3_load_5701_a0_firmware_fix(tp);
8932                 if (err)
8933                         return err;
8934         }
8935
8936         if (tg3_flag(tp, TSO_CAPABLE)) {
8937                 err = tg3_load_tso_firmware(tp);
8938                 if (err)
8939                         return err;
8940         }
8941
8942         tp->tx_mode = TX_MODE_ENABLE;
8943
8944         if (tg3_flag(tp, 5755_PLUS) ||
8945             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8946                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8947
8948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8949                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8950                 tp->tx_mode &= ~val;
8951                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8952         }
8953
8954         tw32_f(MAC_TX_MODE, tp->tx_mode);
8955         udelay(100);
8956
8957         if (tg3_flag(tp, ENABLE_RSS)) {
8958                 int i = 0;
8959                 u32 reg = MAC_RSS_INDIR_TBL_0;
8960
8961                 if (tp->irq_cnt == 2) {
8962                         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
8963                                 tw32(reg, 0x0);
8964                                 reg += 4;
8965                         }
8966                 } else {
8967                         u32 val;
8968
8969                         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8970                                 val = i % (tp->irq_cnt - 1);
8971                                 i++;
8972                                 for (; i % 8; i++) {
8973                                         val <<= 4;
8974                                         val |= (i % (tp->irq_cnt - 1));
8975                                 }
8976                                 tw32(reg, val);
8977                                 reg += 4;
8978                         }
8979                 }
8980
8981                 /* Setup the "secret" hash key. */
8982                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8983                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8984                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8985                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8986                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8987                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8988                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8989                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8990                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8991                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8992         }
8993
8994         tp->rx_mode = RX_MODE_ENABLE;
8995         if (tg3_flag(tp, 5755_PLUS))
8996                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8997
8998         if (tg3_flag(tp, ENABLE_RSS))
8999                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9000                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9001                                RX_MODE_RSS_IPV6_HASH_EN |
9002                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9003                                RX_MODE_RSS_IPV4_HASH_EN |
9004                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9005
9006         tw32_f(MAC_RX_MODE, tp->rx_mode);
9007         udelay(10);
9008
9009         tw32(MAC_LED_CTRL, tp->led_ctrl);
9010
9011         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9012         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9013                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9014                 udelay(10);
9015         }
9016         tw32_f(MAC_RX_MODE, tp->rx_mode);
9017         udelay(10);
9018
9019         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9020                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9021                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9022                         /* Set drive transmission level to 1.2V  */
9023                         /* only if the signal pre-emphasis bit is not set  */
9024                         val = tr32(MAC_SERDES_CFG);
9025                         val &= 0xfffff000;
9026                         val |= 0x880;
9027                         tw32(MAC_SERDES_CFG, val);
9028                 }
9029                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9030                         tw32(MAC_SERDES_CFG, 0x616000);
9031         }
9032
9033         /* Prevent chip from dropping frames when flow control
9034          * is enabled.
9035          */
9036         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9037                 val = 1;
9038         else
9039                 val = 2;
9040         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9041
9042         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9043             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9044                 /* Use hardware link auto-negotiation */
9045                 tg3_flag_set(tp, HW_AUTONEG);
9046         }
9047
9048         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9049             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9050                 u32 tmp;
9051
9052                 tmp = tr32(SERDES_RX_CTRL);
9053                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9054                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9055                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9056                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9057         }
9058
9059         if (!tg3_flag(tp, USE_PHYLIB)) {
9060                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
9061                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9062                         tp->link_config.speed = tp->link_config.orig_speed;
9063                         tp->link_config.duplex = tp->link_config.orig_duplex;
9064                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
9065                 }
9066
9067                 err = tg3_setup_phy(tp, 0);
9068                 if (err)
9069                         return err;
9070
9071                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9072                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9073                         u32 tmp;
9074
9075                         /* Clear CRC stats. */
9076                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9077                                 tg3_writephy(tp, MII_TG3_TEST1,
9078                                              tmp | MII_TG3_TEST1_CRC_EN);
9079                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9080                         }
9081                 }
9082         }
9083
9084         __tg3_set_rx_mode(tp->dev);
9085
9086         /* Initialize receive rules. */
9087         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9088         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9089         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9090         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9091
9092         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9093                 limit = 8;
9094         else
9095                 limit = 16;
9096         if (tg3_flag(tp, ENABLE_ASF))
9097                 limit -= 4;
9098         switch (limit) {
9099         case 16:
9100                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9101         case 15:
9102                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9103         case 14:
9104                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9105         case 13:
9106                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9107         case 12:
9108                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9109         case 11:
9110                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9111         case 10:
9112                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9113         case 9:
9114                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9115         case 8:
9116                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9117         case 7:
9118                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9119         case 6:
9120                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9121         case 5:
9122                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9123         case 4:
9124                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9125         case 3:
9126                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9127         case 2:
9128         case 1:
9129
9130         default:
9131                 break;
9132         }
9133
9134         if (tg3_flag(tp, ENABLE_APE))
9135                 /* Write our heartbeat update interval to APE. */
9136                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9137                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9138
9139         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9140
9141         return 0;
9142 }
9143
9144 /* Called at device open time to get the chip ready for
9145  * packet processing.  Invoked with tp->lock held.
9146  */
9147 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9148 {
9149         tg3_switch_clocks(tp);
9150
9151         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9152
9153         return tg3_reset_hw(tp, reset_phy);
9154 }
9155
9156 #define TG3_STAT_ADD32(PSTAT, REG) \
9157 do {    u32 __val = tr32(REG); \
9158         (PSTAT)->low += __val; \
9159         if ((PSTAT)->low < __val) \
9160                 (PSTAT)->high += 1; \
9161 } while (0)
9162
9163 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9164 {
9165         struct tg3_hw_stats *sp = tp->hw_stats;
9166
9167         if (!netif_carrier_ok(tp->dev))
9168                 return;
9169
9170         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9171         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9172         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9173         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9174         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9175         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9176         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9177         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9178         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9179         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9180         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9181         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9182         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9183
9184         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9185         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9186         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9187         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9188         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9189         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9190         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9191         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9192         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9193         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9194         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9195         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9196         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9197         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9198
9199         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9200         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9201             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9202             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9203                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9204         } else {
9205                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9206                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9207                 if (val) {
9208                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9209                         sp->rx_discards.low += val;
9210                         if (sp->rx_discards.low < val)
9211                                 sp->rx_discards.high += 1;
9212                 }
9213                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9214         }
9215         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9216 }
9217
9218 static void tg3_chk_missed_msi(struct tg3 *tp)
9219 {
9220         u32 i;
9221
9222         for (i = 0; i < tp->irq_cnt; i++) {
9223                 struct tg3_napi *tnapi = &tp->napi[i];
9224
9225                 if (tg3_has_work(tnapi)) {
9226                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9227                             tnapi->last_tx_cons == tnapi->tx_cons) {
9228                                 if (tnapi->chk_msi_cnt < 1) {
9229                                         tnapi->chk_msi_cnt++;
9230                                         return;
9231                                 }
9232                                 tg3_msi(0, tnapi);
9233                         }
9234                 }
9235                 tnapi->chk_msi_cnt = 0;
9236                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9237                 tnapi->last_tx_cons = tnapi->tx_cons;
9238         }
9239 }
9240
9241 static void tg3_timer(unsigned long __opaque)
9242 {
9243         struct tg3 *tp = (struct tg3 *) __opaque;
9244
9245         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9246                 goto restart_timer;
9247
9248         spin_lock(&tp->lock);
9249
9250         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9251             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
9252                 tg3_chk_missed_msi(tp);
9253
9254         if (!tg3_flag(tp, TAGGED_STATUS)) {
9255                 /* All of this garbage is because when using non-tagged
9256                  * IRQ status the mailbox/status_block protocol the chip
9257                  * uses with the cpu is race prone.
9258                  */
9259                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9260                         tw32(GRC_LOCAL_CTRL,
9261                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9262                 } else {
9263                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9264                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9265                 }
9266
9267                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9268                         spin_unlock(&tp->lock);
9269                         tg3_reset_task_schedule(tp);
9270                         goto restart_timer;
9271                 }
9272         }
9273
9274         /* This part only runs once per second. */
9275         if (!--tp->timer_counter) {
9276                 if (tg3_flag(tp, 5705_PLUS))
9277                         tg3_periodic_fetch_stats(tp);
9278
9279                 if (tp->setlpicnt && !--tp->setlpicnt)
9280                         tg3_phy_eee_enable(tp);
9281
9282                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9283                         u32 mac_stat;
9284                         int phy_event;
9285
9286                         mac_stat = tr32(MAC_STATUS);
9287
9288                         phy_event = 0;
9289                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9290                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9291                                         phy_event = 1;
9292                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9293                                 phy_event = 1;
9294
9295                         if (phy_event)
9296                                 tg3_setup_phy(tp, 0);
9297                 } else if (tg3_flag(tp, POLL_SERDES)) {
9298                         u32 mac_stat = tr32(MAC_STATUS);
9299                         int need_setup = 0;
9300
9301                         if (netif_carrier_ok(tp->dev) &&
9302                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9303                                 need_setup = 1;
9304                         }
9305                         if (!netif_carrier_ok(tp->dev) &&
9306                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9307                                          MAC_STATUS_SIGNAL_DET))) {
9308                                 need_setup = 1;
9309                         }
9310                         if (need_setup) {
9311                                 if (!tp->serdes_counter) {
9312                                         tw32_f(MAC_MODE,
9313                                              (tp->mac_mode &
9314                                               ~MAC_MODE_PORT_MODE_MASK));
9315                                         udelay(40);
9316                                         tw32_f(MAC_MODE, tp->mac_mode);
9317                                         udelay(40);
9318                                 }
9319                                 tg3_setup_phy(tp, 0);
9320                         }
9321                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9322                            tg3_flag(tp, 5780_CLASS)) {
9323                         tg3_serdes_parallel_detect(tp);
9324                 }
9325
9326                 tp->timer_counter = tp->timer_multiplier;
9327         }
9328
9329         /* Heartbeat is only sent once every 2 seconds.
9330          *
9331          * The heartbeat is to tell the ASF firmware that the host
9332          * driver is still alive.  In the event that the OS crashes,
9333          * ASF needs to reset the hardware to free up the FIFO space
9334          * that may be filled with rx packets destined for the host.
9335          * If the FIFO is full, ASF will no longer function properly.
9336          *
9337          * Unintended resets have been reported on real time kernels
9338          * where the timer doesn't run on time.  Netpoll will also have
9339          * same problem.
9340          *
9341          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9342          * to check the ring condition when the heartbeat is expiring
9343          * before doing the reset.  This will prevent most unintended
9344          * resets.
9345          */
9346         if (!--tp->asf_counter) {
9347                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9348                         tg3_wait_for_event_ack(tp);
9349
9350                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9351                                       FWCMD_NICDRV_ALIVE3);
9352                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9353                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9354                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9355
9356                         tg3_generate_fw_event(tp);
9357                 }
9358                 tp->asf_counter = tp->asf_multiplier;
9359         }
9360
9361         spin_unlock(&tp->lock);
9362
9363 restart_timer:
9364         tp->timer.expires = jiffies + tp->timer_offset;
9365         add_timer(&tp->timer);
9366 }
9367
9368 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9369 {
9370         irq_handler_t fn;
9371         unsigned long flags;
9372         char *name;
9373         struct tg3_napi *tnapi = &tp->napi[irq_num];
9374
9375         if (tp->irq_cnt == 1)
9376                 name = tp->dev->name;
9377         else {
9378                 name = &tnapi->irq_lbl[0];
9379                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9380                 name[IFNAMSIZ-1] = 0;
9381         }
9382
9383         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9384                 fn = tg3_msi;
9385                 if (tg3_flag(tp, 1SHOT_MSI))
9386                         fn = tg3_msi_1shot;
9387                 flags = 0;
9388         } else {
9389                 fn = tg3_interrupt;
9390                 if (tg3_flag(tp, TAGGED_STATUS))
9391                         fn = tg3_interrupt_tagged;
9392                 flags = IRQF_SHARED;
9393         }
9394
9395         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9396 }
9397
9398 static int tg3_test_interrupt(struct tg3 *tp)
9399 {
9400         struct tg3_napi *tnapi = &tp->napi[0];
9401         struct net_device *dev = tp->dev;
9402         int err, i, intr_ok = 0;
9403         u32 val;
9404
9405         if (!netif_running(dev))
9406                 return -ENODEV;
9407
9408         tg3_disable_ints(tp);
9409
9410         free_irq(tnapi->irq_vec, tnapi);
9411
9412         /*
9413          * Turn off MSI one shot mode.  Otherwise this test has no
9414          * observable way to know whether the interrupt was delivered.
9415          */
9416         if (tg3_flag(tp, 57765_PLUS)) {
9417                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9418                 tw32(MSGINT_MODE, val);
9419         }
9420
9421         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9422                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9423         if (err)
9424                 return err;
9425
9426         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9427         tg3_enable_ints(tp);
9428
9429         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9430                tnapi->coal_now);
9431
9432         for (i = 0; i < 5; i++) {
9433                 u32 int_mbox, misc_host_ctrl;
9434
9435                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9436                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9437
9438                 if ((int_mbox != 0) ||
9439                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9440                         intr_ok = 1;
9441                         break;
9442                 }
9443
9444                 if (tg3_flag(tp, 57765_PLUS) &&
9445                     tnapi->hw_status->status_tag != tnapi->last_tag)
9446                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9447
9448                 msleep(10);
9449         }
9450
9451         tg3_disable_ints(tp);
9452
9453         free_irq(tnapi->irq_vec, tnapi);
9454
9455         err = tg3_request_irq(tp, 0);
9456
9457         if (err)
9458                 return err;
9459
9460         if (intr_ok) {
9461                 /* Reenable MSI one shot mode. */
9462                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9463                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9464                         tw32(MSGINT_MODE, val);
9465                 }
9466                 return 0;
9467         }
9468
9469         return -EIO;
9470 }
9471
9472 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9473  * successfully restored
9474  */
9475 static int tg3_test_msi(struct tg3 *tp)
9476 {
9477         int err;
9478         u16 pci_cmd;
9479
9480         if (!tg3_flag(tp, USING_MSI))
9481                 return 0;
9482
9483         /* Turn off SERR reporting in case MSI terminates with Master
9484          * Abort.
9485          */
9486         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9487         pci_write_config_word(tp->pdev, PCI_COMMAND,
9488                               pci_cmd & ~PCI_COMMAND_SERR);
9489
9490         err = tg3_test_interrupt(tp);
9491
9492         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9493
9494         if (!err)
9495                 return 0;
9496
9497         /* other failures */
9498         if (err != -EIO)
9499                 return err;
9500
9501         /* MSI test failed, go back to INTx mode */
9502         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9503                     "to INTx mode. Please report this failure to the PCI "
9504                     "maintainer and include system chipset information\n");
9505
9506         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9507
9508         pci_disable_msi(tp->pdev);
9509
9510         tg3_flag_clear(tp, USING_MSI);
9511         tp->napi[0].irq_vec = tp->pdev->irq;
9512
9513         err = tg3_request_irq(tp, 0);
9514         if (err)
9515                 return err;
9516
9517         /* Need to reset the chip because the MSI cycle may have terminated
9518          * with Master Abort.
9519          */
9520         tg3_full_lock(tp, 1);
9521
9522         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9523         err = tg3_init_hw(tp, 1);
9524
9525         tg3_full_unlock(tp);
9526
9527         if (err)
9528                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9529
9530         return err;
9531 }
9532
9533 static int tg3_request_firmware(struct tg3 *tp)
9534 {
9535         const __be32 *fw_data;
9536
9537         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9538                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9539                            tp->fw_needed);
9540                 return -ENOENT;
9541         }
9542
9543         fw_data = (void *)tp->fw->data;
9544
9545         /* Firmware blob starts with version numbers, followed by
9546          * start address and _full_ length including BSS sections
9547          * (which must be longer than the actual data, of course
9548          */
9549
9550         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9551         if (tp->fw_len < (tp->fw->size - 12)) {
9552                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9553                            tp->fw_len, tp->fw_needed);
9554                 release_firmware(tp->fw);
9555                 tp->fw = NULL;
9556                 return -EINVAL;
9557         }
9558
9559         /* We no longer need firmware; we have it. */
9560         tp->fw_needed = NULL;
9561         return 0;
9562 }
9563
9564 static bool tg3_enable_msix(struct tg3 *tp)
9565 {
9566         int i, rc, cpus = num_online_cpus();
9567         struct msix_entry msix_ent[tp->irq_max];
9568
9569         if (cpus == 1)
9570                 /* Just fallback to the simpler MSI mode. */
9571                 return false;
9572
9573         /*
9574          * We want as many rx rings enabled as there are cpus.
9575          * The first MSIX vector only deals with link interrupts, etc,
9576          * so we add one to the number of vectors we are requesting.
9577          */
9578         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9579
9580         for (i = 0; i < tp->irq_max; i++) {
9581                 msix_ent[i].entry  = i;
9582                 msix_ent[i].vector = 0;
9583         }
9584
9585         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9586         if (rc < 0) {
9587                 return false;
9588         } else if (rc != 0) {
9589                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9590                         return false;
9591                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9592                               tp->irq_cnt, rc);
9593                 tp->irq_cnt = rc;
9594         }
9595
9596         for (i = 0; i < tp->irq_max; i++)
9597                 tp->napi[i].irq_vec = msix_ent[i].vector;
9598
9599         netif_set_real_num_tx_queues(tp->dev, 1);
9600         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9601         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9602                 pci_disable_msix(tp->pdev);
9603                 return false;
9604         }
9605
9606         if (tp->irq_cnt > 1) {
9607                 tg3_flag_set(tp, ENABLE_RSS);
9608
9609                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9610                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9611                         tg3_flag_set(tp, ENABLE_TSS);
9612                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9613                 }
9614         }
9615
9616         return true;
9617 }
9618
9619 static void tg3_ints_init(struct tg3 *tp)
9620 {
9621         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9622             !tg3_flag(tp, TAGGED_STATUS)) {
9623                 /* All MSI supporting chips should support tagged
9624                  * status.  Assert that this is the case.
9625                  */
9626                 netdev_warn(tp->dev,
9627                             "MSI without TAGGED_STATUS? Not using MSI\n");
9628                 goto defcfg;
9629         }
9630
9631         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9632                 tg3_flag_set(tp, USING_MSIX);
9633         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9634                 tg3_flag_set(tp, USING_MSI);
9635
9636         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9637                 u32 msi_mode = tr32(MSGINT_MODE);
9638                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9639                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9640                 if (!tg3_flag(tp, 1SHOT_MSI))
9641                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9642                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9643         }
9644 defcfg:
9645         if (!tg3_flag(tp, USING_MSIX)) {
9646                 tp->irq_cnt = 1;
9647                 tp->napi[0].irq_vec = tp->pdev->irq;
9648                 netif_set_real_num_tx_queues(tp->dev, 1);
9649                 netif_set_real_num_rx_queues(tp->dev, 1);
9650         }
9651 }
9652
9653 static void tg3_ints_fini(struct tg3 *tp)
9654 {
9655         if (tg3_flag(tp, USING_MSIX))
9656                 pci_disable_msix(tp->pdev);
9657         else if (tg3_flag(tp, USING_MSI))
9658                 pci_disable_msi(tp->pdev);
9659         tg3_flag_clear(tp, USING_MSI);
9660         tg3_flag_clear(tp, USING_MSIX);
9661         tg3_flag_clear(tp, ENABLE_RSS);
9662         tg3_flag_clear(tp, ENABLE_TSS);
9663 }
9664
9665 static int tg3_open(struct net_device *dev)
9666 {
9667         struct tg3 *tp = netdev_priv(dev);
9668         int i, err;
9669
9670         if (tp->fw_needed) {
9671                 err = tg3_request_firmware(tp);
9672                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9673                         if (err)
9674                                 return err;
9675                 } else if (err) {
9676                         netdev_warn(tp->dev, "TSO capability disabled\n");
9677                         tg3_flag_clear(tp, TSO_CAPABLE);
9678                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9679                         netdev_notice(tp->dev, "TSO capability restored\n");
9680                         tg3_flag_set(tp, TSO_CAPABLE);
9681                 }
9682         }
9683
9684         netif_carrier_off(tp->dev);
9685
9686         err = tg3_power_up(tp);
9687         if (err)
9688                 return err;
9689
9690         tg3_full_lock(tp, 0);
9691
9692         tg3_disable_ints(tp);
9693         tg3_flag_clear(tp, INIT_COMPLETE);
9694
9695         tg3_full_unlock(tp);
9696
9697         /*
9698          * Setup interrupts first so we know how
9699          * many NAPI resources to allocate
9700          */
9701         tg3_ints_init(tp);
9702
9703         /* The placement of this call is tied
9704          * to the setup and use of Host TX descriptors.
9705          */
9706         err = tg3_alloc_consistent(tp);
9707         if (err)
9708                 goto err_out1;
9709
9710         tg3_napi_init(tp);
9711
9712         tg3_napi_enable(tp);
9713
9714         for (i = 0; i < tp->irq_cnt; i++) {
9715                 struct tg3_napi *tnapi = &tp->napi[i];
9716                 err = tg3_request_irq(tp, i);
9717                 if (err) {
9718                         for (i--; i >= 0; i--) {
9719                                 tnapi = &tp->napi[i];
9720                                 free_irq(tnapi->irq_vec, tnapi);
9721                         }
9722                         goto err_out2;
9723                 }
9724         }
9725
9726         tg3_full_lock(tp, 0);
9727
9728         err = tg3_init_hw(tp, 1);
9729         if (err) {
9730                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9731                 tg3_free_rings(tp);
9732         } else {
9733                 if (tg3_flag(tp, TAGGED_STATUS) &&
9734                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9735                         GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
9736                         tp->timer_offset = HZ;
9737                 else
9738                         tp->timer_offset = HZ / 10;
9739
9740                 BUG_ON(tp->timer_offset > HZ);
9741                 tp->timer_counter = tp->timer_multiplier =
9742                         (HZ / tp->timer_offset);
9743                 tp->asf_counter = tp->asf_multiplier =
9744                         ((HZ / tp->timer_offset) * 2);
9745
9746                 init_timer(&tp->timer);
9747                 tp->timer.expires = jiffies + tp->timer_offset;
9748                 tp->timer.data = (unsigned long) tp;
9749                 tp->timer.function = tg3_timer;
9750         }
9751
9752         tg3_full_unlock(tp);
9753
9754         if (err)
9755                 goto err_out3;
9756
9757         if (tg3_flag(tp, USING_MSI)) {
9758                 err = tg3_test_msi(tp);
9759
9760                 if (err) {
9761                         tg3_full_lock(tp, 0);
9762                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9763                         tg3_free_rings(tp);
9764                         tg3_full_unlock(tp);
9765
9766                         goto err_out2;
9767                 }
9768
9769                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9770                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9771
9772                         tw32(PCIE_TRANSACTION_CFG,
9773                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9774                 }
9775         }
9776
9777         tg3_phy_start(tp);
9778
9779         tg3_full_lock(tp, 0);
9780
9781         add_timer(&tp->timer);
9782         tg3_flag_set(tp, INIT_COMPLETE);
9783         tg3_enable_ints(tp);
9784
9785         tg3_full_unlock(tp);
9786
9787         netif_tx_start_all_queues(dev);
9788
9789         /*
9790          * Reset loopback feature if it was turned on while the device was down
9791          * make sure that it's installed properly now.
9792          */
9793         if (dev->features & NETIF_F_LOOPBACK)
9794                 tg3_set_loopback(dev, dev->features);
9795
9796         return 0;
9797
9798 err_out3:
9799         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9800                 struct tg3_napi *tnapi = &tp->napi[i];
9801                 free_irq(tnapi->irq_vec, tnapi);
9802         }
9803
9804 err_out2:
9805         tg3_napi_disable(tp);
9806         tg3_napi_fini(tp);
9807         tg3_free_consistent(tp);
9808
9809 err_out1:
9810         tg3_ints_fini(tp);
9811         tg3_frob_aux_power(tp, false);
9812         pci_set_power_state(tp->pdev, PCI_D3hot);
9813         return err;
9814 }
9815
9816 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9817                                                  struct rtnl_link_stats64 *);
9818 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9819
9820 static int tg3_close(struct net_device *dev)
9821 {
9822         int i;
9823         struct tg3 *tp = netdev_priv(dev);
9824
9825         tg3_napi_disable(tp);
9826         tg3_reset_task_cancel(tp);
9827
9828         netif_tx_stop_all_queues(dev);
9829
9830         del_timer_sync(&tp->timer);
9831
9832         tg3_phy_stop(tp);
9833
9834         tg3_full_lock(tp, 1);
9835
9836         tg3_disable_ints(tp);
9837
9838         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9839         tg3_free_rings(tp);
9840         tg3_flag_clear(tp, INIT_COMPLETE);
9841
9842         tg3_full_unlock(tp);
9843
9844         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9845                 struct tg3_napi *tnapi = &tp->napi[i];
9846                 free_irq(tnapi->irq_vec, tnapi);
9847         }
9848
9849         tg3_ints_fini(tp);
9850
9851         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9852
9853         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9854                sizeof(tp->estats_prev));
9855
9856         tg3_napi_fini(tp);
9857
9858         tg3_free_consistent(tp);
9859
9860         tg3_power_down(tp);
9861
9862         netif_carrier_off(tp->dev);
9863
9864         return 0;
9865 }
9866
9867 static inline u64 get_stat64(tg3_stat64_t *val)
9868 {
9869        return ((u64)val->high << 32) | ((u64)val->low);
9870 }
9871
9872 static u64 calc_crc_errors(struct tg3 *tp)
9873 {
9874         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9875
9876         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9877             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9878              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9879                 u32 val;
9880
9881                 spin_lock_bh(&tp->lock);
9882                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9883                         tg3_writephy(tp, MII_TG3_TEST1,
9884                                      val | MII_TG3_TEST1_CRC_EN);
9885                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9886                 } else
9887                         val = 0;
9888                 spin_unlock_bh(&tp->lock);
9889
9890                 tp->phy_crc_errors += val;
9891
9892                 return tp->phy_crc_errors;
9893         }
9894
9895         return get_stat64(&hw_stats->rx_fcs_errors);
9896 }
9897
9898 #define ESTAT_ADD(member) \
9899         estats->member =        old_estats->member + \
9900                                 get_stat64(&hw_stats->member)
9901
9902 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9903 {
9904         struct tg3_ethtool_stats *estats = &tp->estats;
9905         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9906         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9907
9908         if (!hw_stats)
9909                 return old_estats;
9910
9911         ESTAT_ADD(rx_octets);
9912         ESTAT_ADD(rx_fragments);
9913         ESTAT_ADD(rx_ucast_packets);
9914         ESTAT_ADD(rx_mcast_packets);
9915         ESTAT_ADD(rx_bcast_packets);
9916         ESTAT_ADD(rx_fcs_errors);
9917         ESTAT_ADD(rx_align_errors);
9918         ESTAT_ADD(rx_xon_pause_rcvd);
9919         ESTAT_ADD(rx_xoff_pause_rcvd);
9920         ESTAT_ADD(rx_mac_ctrl_rcvd);
9921         ESTAT_ADD(rx_xoff_entered);
9922         ESTAT_ADD(rx_frame_too_long_errors);
9923         ESTAT_ADD(rx_jabbers);
9924         ESTAT_ADD(rx_undersize_packets);
9925         ESTAT_ADD(rx_in_length_errors);
9926         ESTAT_ADD(rx_out_length_errors);
9927         ESTAT_ADD(rx_64_or_less_octet_packets);
9928         ESTAT_ADD(rx_65_to_127_octet_packets);
9929         ESTAT_ADD(rx_128_to_255_octet_packets);
9930         ESTAT_ADD(rx_256_to_511_octet_packets);
9931         ESTAT_ADD(rx_512_to_1023_octet_packets);
9932         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9933         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9934         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9935         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9936         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9937
9938         ESTAT_ADD(tx_octets);
9939         ESTAT_ADD(tx_collisions);
9940         ESTAT_ADD(tx_xon_sent);
9941         ESTAT_ADD(tx_xoff_sent);
9942         ESTAT_ADD(tx_flow_control);
9943         ESTAT_ADD(tx_mac_errors);
9944         ESTAT_ADD(tx_single_collisions);
9945         ESTAT_ADD(tx_mult_collisions);
9946         ESTAT_ADD(tx_deferred);
9947         ESTAT_ADD(tx_excessive_collisions);
9948         ESTAT_ADD(tx_late_collisions);
9949         ESTAT_ADD(tx_collide_2times);
9950         ESTAT_ADD(tx_collide_3times);
9951         ESTAT_ADD(tx_collide_4times);
9952         ESTAT_ADD(tx_collide_5times);
9953         ESTAT_ADD(tx_collide_6times);
9954         ESTAT_ADD(tx_collide_7times);
9955         ESTAT_ADD(tx_collide_8times);
9956         ESTAT_ADD(tx_collide_9times);
9957         ESTAT_ADD(tx_collide_10times);
9958         ESTAT_ADD(tx_collide_11times);
9959         ESTAT_ADD(tx_collide_12times);
9960         ESTAT_ADD(tx_collide_13times);
9961         ESTAT_ADD(tx_collide_14times);
9962         ESTAT_ADD(tx_collide_15times);
9963         ESTAT_ADD(tx_ucast_packets);
9964         ESTAT_ADD(tx_mcast_packets);
9965         ESTAT_ADD(tx_bcast_packets);
9966         ESTAT_ADD(tx_carrier_sense_errors);
9967         ESTAT_ADD(tx_discards);
9968         ESTAT_ADD(tx_errors);
9969
9970         ESTAT_ADD(dma_writeq_full);
9971         ESTAT_ADD(dma_write_prioq_full);
9972         ESTAT_ADD(rxbds_empty);
9973         ESTAT_ADD(rx_discards);
9974         ESTAT_ADD(rx_errors);
9975         ESTAT_ADD(rx_threshold_hit);
9976
9977         ESTAT_ADD(dma_readq_full);
9978         ESTAT_ADD(dma_read_prioq_full);
9979         ESTAT_ADD(tx_comp_queue_full);
9980
9981         ESTAT_ADD(ring_set_send_prod_index);
9982         ESTAT_ADD(ring_status_update);
9983         ESTAT_ADD(nic_irqs);
9984         ESTAT_ADD(nic_avoided_irqs);
9985         ESTAT_ADD(nic_tx_threshold_hit);
9986
9987         ESTAT_ADD(mbuf_lwm_thresh_hit);
9988
9989         return estats;
9990 }
9991
9992 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9993                                                  struct rtnl_link_stats64 *stats)
9994 {
9995         struct tg3 *tp = netdev_priv(dev);
9996         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9997         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9998
9999         if (!hw_stats)
10000                 return old_stats;
10001
10002         stats->rx_packets = old_stats->rx_packets +
10003                 get_stat64(&hw_stats->rx_ucast_packets) +
10004                 get_stat64(&hw_stats->rx_mcast_packets) +
10005                 get_stat64(&hw_stats->rx_bcast_packets);
10006
10007         stats->tx_packets = old_stats->tx_packets +
10008                 get_stat64(&hw_stats->tx_ucast_packets) +
10009                 get_stat64(&hw_stats->tx_mcast_packets) +
10010                 get_stat64(&hw_stats->tx_bcast_packets);
10011
10012         stats->rx_bytes = old_stats->rx_bytes +
10013                 get_stat64(&hw_stats->rx_octets);
10014         stats->tx_bytes = old_stats->tx_bytes +
10015                 get_stat64(&hw_stats->tx_octets);
10016
10017         stats->rx_errors = old_stats->rx_errors +
10018                 get_stat64(&hw_stats->rx_errors);
10019         stats->tx_errors = old_stats->tx_errors +
10020                 get_stat64(&hw_stats->tx_errors) +
10021                 get_stat64(&hw_stats->tx_mac_errors) +
10022                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10023                 get_stat64(&hw_stats->tx_discards);
10024
10025         stats->multicast = old_stats->multicast +
10026                 get_stat64(&hw_stats->rx_mcast_packets);
10027         stats->collisions = old_stats->collisions +
10028                 get_stat64(&hw_stats->tx_collisions);
10029
10030         stats->rx_length_errors = old_stats->rx_length_errors +
10031                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10032                 get_stat64(&hw_stats->rx_undersize_packets);
10033
10034         stats->rx_over_errors = old_stats->rx_over_errors +
10035                 get_stat64(&hw_stats->rxbds_empty);
10036         stats->rx_frame_errors = old_stats->rx_frame_errors +
10037                 get_stat64(&hw_stats->rx_align_errors);
10038         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10039                 get_stat64(&hw_stats->tx_discards);
10040         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10041                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10042
10043         stats->rx_crc_errors = old_stats->rx_crc_errors +
10044                 calc_crc_errors(tp);
10045
10046         stats->rx_missed_errors = old_stats->rx_missed_errors +
10047                 get_stat64(&hw_stats->rx_discards);
10048
10049         stats->rx_dropped = tp->rx_dropped;
10050         stats->tx_dropped = tp->tx_dropped;
10051
10052         return stats;
10053 }
10054
10055 static inline u32 calc_crc(unsigned char *buf, int len)
10056 {
10057         u32 reg;
10058         u32 tmp;
10059         int j, k;
10060
10061         reg = 0xffffffff;
10062
10063         for (j = 0; j < len; j++) {
10064                 reg ^= buf[j];
10065
10066                 for (k = 0; k < 8; k++) {
10067                         tmp = reg & 0x01;
10068
10069                         reg >>= 1;
10070
10071                         if (tmp)
10072                                 reg ^= 0xedb88320;
10073                 }
10074         }
10075
10076         return ~reg;
10077 }
10078
10079 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
10080 {
10081         /* accept or reject all multicast frames */
10082         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
10083         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
10084         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
10085         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
10086 }
10087
10088 static void __tg3_set_rx_mode(struct net_device *dev)
10089 {
10090         struct tg3 *tp = netdev_priv(dev);
10091         u32 rx_mode;
10092
10093         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
10094                                   RX_MODE_KEEP_VLAN_TAG);
10095
10096 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
10097         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
10098          * flag clear.
10099          */
10100         if (!tg3_flag(tp, ENABLE_ASF))
10101                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
10102 #endif
10103
10104         if (dev->flags & IFF_PROMISC) {
10105                 /* Promiscuous mode. */
10106                 rx_mode |= RX_MODE_PROMISC;
10107         } else if (dev->flags & IFF_ALLMULTI) {
10108                 /* Accept all multicast. */
10109                 tg3_set_multi(tp, 1);
10110         } else if (netdev_mc_empty(dev)) {
10111                 /* Reject all multicast. */
10112                 tg3_set_multi(tp, 0);
10113         } else {
10114                 /* Accept one or more multicast(s). */
10115                 struct netdev_hw_addr *ha;
10116                 u32 mc_filter[4] = { 0, };
10117                 u32 regidx;
10118                 u32 bit;
10119                 u32 crc;
10120
10121                 netdev_for_each_mc_addr(ha, dev) {
10122                         crc = calc_crc(ha->addr, ETH_ALEN);
10123                         bit = ~crc & 0x7f;
10124                         regidx = (bit & 0x60) >> 5;
10125                         bit &= 0x1f;
10126                         mc_filter[regidx] |= (1 << bit);
10127                 }
10128
10129                 tw32(MAC_HASH_REG_0, mc_filter[0]);
10130                 tw32(MAC_HASH_REG_1, mc_filter[1]);
10131                 tw32(MAC_HASH_REG_2, mc_filter[2]);
10132                 tw32(MAC_HASH_REG_3, mc_filter[3]);
10133         }
10134
10135         if (rx_mode != tp->rx_mode) {
10136                 tp->rx_mode = rx_mode;
10137                 tw32_f(MAC_RX_MODE, rx_mode);
10138                 udelay(10);
10139         }
10140 }
10141
10142 static void tg3_set_rx_mode(struct net_device *dev)
10143 {
10144         struct tg3 *tp = netdev_priv(dev);
10145
10146         if (!netif_running(dev))
10147                 return;
10148
10149         tg3_full_lock(tp, 0);
10150         __tg3_set_rx_mode(dev);
10151         tg3_full_unlock(tp);
10152 }
10153
10154 static int tg3_get_regs_len(struct net_device *dev)
10155 {
10156         return TG3_REG_BLK_SIZE;
10157 }
10158
10159 static void tg3_get_regs(struct net_device *dev,
10160                 struct ethtool_regs *regs, void *_p)
10161 {
10162         struct tg3 *tp = netdev_priv(dev);
10163
10164         regs->version = 0;
10165
10166         memset(_p, 0, TG3_REG_BLK_SIZE);
10167
10168         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10169                 return;
10170
10171         tg3_full_lock(tp, 0);
10172
10173         tg3_dump_legacy_regs(tp, (u32 *)_p);
10174
10175         tg3_full_unlock(tp);
10176 }
10177
10178 static int tg3_get_eeprom_len(struct net_device *dev)
10179 {
10180         struct tg3 *tp = netdev_priv(dev);
10181
10182         return tp->nvram_size;
10183 }
10184
10185 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10186 {
10187         struct tg3 *tp = netdev_priv(dev);
10188         int ret;
10189         u8  *pd;
10190         u32 i, offset, len, b_offset, b_count;
10191         __be32 val;
10192
10193         if (tg3_flag(tp, NO_NVRAM))
10194                 return -EINVAL;
10195
10196         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10197                 return -EAGAIN;
10198
10199         offset = eeprom->offset;
10200         len = eeprom->len;
10201         eeprom->len = 0;
10202
10203         eeprom->magic = TG3_EEPROM_MAGIC;
10204
10205         if (offset & 3) {
10206                 /* adjustments to start on required 4 byte boundary */
10207                 b_offset = offset & 3;
10208                 b_count = 4 - b_offset;
10209                 if (b_count > len) {
10210                         /* i.e. offset=1 len=2 */
10211                         b_count = len;
10212                 }
10213                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10214                 if (ret)
10215                         return ret;
10216                 memcpy(data, ((char *)&val) + b_offset, b_count);
10217                 len -= b_count;
10218                 offset += b_count;
10219                 eeprom->len += b_count;
10220         }
10221
10222         /* read bytes up to the last 4 byte boundary */
10223         pd = &data[eeprom->len];
10224         for (i = 0; i < (len - (len & 3)); i += 4) {
10225                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10226                 if (ret) {
10227                         eeprom->len += i;
10228                         return ret;
10229                 }
10230                 memcpy(pd + i, &val, 4);
10231         }
10232         eeprom->len += i;
10233
10234         if (len & 3) {
10235                 /* read last bytes not ending on 4 byte boundary */
10236                 pd = &data[eeprom->len];
10237                 b_count = len & 3;
10238                 b_offset = offset + len - b_count;
10239                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10240                 if (ret)
10241                         return ret;
10242                 memcpy(pd, &val, b_count);
10243                 eeprom->len += b_count;
10244         }
10245         return 0;
10246 }
10247
10248 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
10249
10250 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10251 {
10252         struct tg3 *tp = netdev_priv(dev);
10253         int ret;
10254         u32 offset, len, b_offset, odd_len;
10255         u8 *buf;
10256         __be32 start, end;
10257
10258         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10259                 return -EAGAIN;
10260
10261         if (tg3_flag(tp, NO_NVRAM) ||
10262             eeprom->magic != TG3_EEPROM_MAGIC)
10263                 return -EINVAL;
10264
10265         offset = eeprom->offset;
10266         len = eeprom->len;
10267
10268         if ((b_offset = (offset & 3))) {
10269                 /* adjustments to start on required 4 byte boundary */
10270                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10271                 if (ret)
10272                         return ret;
10273                 len += b_offset;
10274                 offset &= ~3;
10275                 if (len < 4)
10276                         len = 4;
10277         }
10278
10279         odd_len = 0;
10280         if (len & 3) {
10281                 /* adjustments to end on required 4 byte boundary */
10282                 odd_len = 1;
10283                 len = (len + 3) & ~3;
10284                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10285                 if (ret)
10286                         return ret;
10287         }
10288
10289         buf = data;
10290         if (b_offset || odd_len) {
10291                 buf = kmalloc(len, GFP_KERNEL);
10292                 if (!buf)
10293                         return -ENOMEM;
10294                 if (b_offset)
10295                         memcpy(buf, &start, 4);
10296                 if (odd_len)
10297                         memcpy(buf+len-4, &end, 4);
10298                 memcpy(buf + b_offset, data, eeprom->len);
10299         }
10300
10301         ret = tg3_nvram_write_block(tp, offset, len, buf);
10302
10303         if (buf != data)
10304                 kfree(buf);
10305
10306         return ret;
10307 }
10308
10309 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10310 {
10311         struct tg3 *tp = netdev_priv(dev);
10312
10313         if (tg3_flag(tp, USE_PHYLIB)) {
10314                 struct phy_device *phydev;
10315                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10316                         return -EAGAIN;
10317                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10318                 return phy_ethtool_gset(phydev, cmd);
10319         }
10320
10321         cmd->supported = (SUPPORTED_Autoneg);
10322
10323         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10324                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10325                                    SUPPORTED_1000baseT_Full);
10326
10327         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10328                 cmd->supported |= (SUPPORTED_100baseT_Half |
10329                                   SUPPORTED_100baseT_Full |
10330                                   SUPPORTED_10baseT_Half |
10331                                   SUPPORTED_10baseT_Full |
10332                                   SUPPORTED_TP);
10333                 cmd->port = PORT_TP;
10334         } else {
10335                 cmd->supported |= SUPPORTED_FIBRE;
10336                 cmd->port = PORT_FIBRE;
10337         }
10338
10339         cmd->advertising = tp->link_config.advertising;
10340         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10341                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10342                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10343                                 cmd->advertising |= ADVERTISED_Pause;
10344                         } else {
10345                                 cmd->advertising |= ADVERTISED_Pause |
10346                                                     ADVERTISED_Asym_Pause;
10347                         }
10348                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10349                         cmd->advertising |= ADVERTISED_Asym_Pause;
10350                 }
10351         }
10352         if (netif_running(dev)) {
10353                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10354                 cmd->duplex = tp->link_config.active_duplex;
10355         } else {
10356                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
10357                 cmd->duplex = DUPLEX_INVALID;
10358         }
10359         cmd->phy_address = tp->phy_addr;
10360         cmd->transceiver = XCVR_INTERNAL;
10361         cmd->autoneg = tp->link_config.autoneg;
10362         cmd->maxtxpkt = 0;
10363         cmd->maxrxpkt = 0;
10364         return 0;
10365 }
10366
10367 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10368 {
10369         struct tg3 *tp = netdev_priv(dev);
10370         u32 speed = ethtool_cmd_speed(cmd);
10371
10372         if (tg3_flag(tp, USE_PHYLIB)) {
10373                 struct phy_device *phydev;
10374                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10375                         return -EAGAIN;
10376                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10377                 return phy_ethtool_sset(phydev, cmd);
10378         }
10379
10380         if (cmd->autoneg != AUTONEG_ENABLE &&
10381             cmd->autoneg != AUTONEG_DISABLE)
10382                 return -EINVAL;
10383
10384         if (cmd->autoneg == AUTONEG_DISABLE &&
10385             cmd->duplex != DUPLEX_FULL &&
10386             cmd->duplex != DUPLEX_HALF)
10387                 return -EINVAL;
10388
10389         if (cmd->autoneg == AUTONEG_ENABLE) {
10390                 u32 mask = ADVERTISED_Autoneg |
10391                            ADVERTISED_Pause |
10392                            ADVERTISED_Asym_Pause;
10393
10394                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10395                         mask |= ADVERTISED_1000baseT_Half |
10396                                 ADVERTISED_1000baseT_Full;
10397
10398                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10399                         mask |= ADVERTISED_100baseT_Half |
10400                                 ADVERTISED_100baseT_Full |
10401                                 ADVERTISED_10baseT_Half |
10402                                 ADVERTISED_10baseT_Full |
10403                                 ADVERTISED_TP;
10404                 else
10405                         mask |= ADVERTISED_FIBRE;
10406
10407                 if (cmd->advertising & ~mask)
10408                         return -EINVAL;
10409
10410                 mask &= (ADVERTISED_1000baseT_Half |
10411                          ADVERTISED_1000baseT_Full |
10412                          ADVERTISED_100baseT_Half |
10413                          ADVERTISED_100baseT_Full |
10414                          ADVERTISED_10baseT_Half |
10415                          ADVERTISED_10baseT_Full);
10416
10417                 cmd->advertising &= mask;
10418         } else {
10419                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10420                         if (speed != SPEED_1000)
10421                                 return -EINVAL;
10422
10423                         if (cmd->duplex != DUPLEX_FULL)
10424                                 return -EINVAL;
10425                 } else {
10426                         if (speed != SPEED_100 &&
10427                             speed != SPEED_10)
10428                                 return -EINVAL;
10429                 }
10430         }
10431
10432         tg3_full_lock(tp, 0);
10433
10434         tp->link_config.autoneg = cmd->autoneg;
10435         if (cmd->autoneg == AUTONEG_ENABLE) {
10436                 tp->link_config.advertising = (cmd->advertising |
10437                                               ADVERTISED_Autoneg);
10438                 tp->link_config.speed = SPEED_INVALID;
10439                 tp->link_config.duplex = DUPLEX_INVALID;
10440         } else {
10441                 tp->link_config.advertising = 0;
10442                 tp->link_config.speed = speed;
10443                 tp->link_config.duplex = cmd->duplex;
10444         }
10445
10446         tp->link_config.orig_speed = tp->link_config.speed;
10447         tp->link_config.orig_duplex = tp->link_config.duplex;
10448         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10449
10450         if (netif_running(dev))
10451                 tg3_setup_phy(tp, 1);
10452
10453         tg3_full_unlock(tp);
10454
10455         return 0;
10456 }
10457
10458 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10459 {
10460         struct tg3 *tp = netdev_priv(dev);
10461
10462         strcpy(info->driver, DRV_MODULE_NAME);
10463         strcpy(info->version, DRV_MODULE_VERSION);
10464         strcpy(info->fw_version, tp->fw_ver);
10465         strcpy(info->bus_info, pci_name(tp->pdev));
10466 }
10467
10468 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10469 {
10470         struct tg3 *tp = netdev_priv(dev);
10471
10472         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10473                 wol->supported = WAKE_MAGIC;
10474         else
10475                 wol->supported = 0;
10476         wol->wolopts = 0;
10477         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10478                 wol->wolopts = WAKE_MAGIC;
10479         memset(&wol->sopass, 0, sizeof(wol->sopass));
10480 }
10481
10482 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10483 {
10484         struct tg3 *tp = netdev_priv(dev);
10485         struct device *dp = &tp->pdev->dev;
10486
10487         if (wol->wolopts & ~WAKE_MAGIC)
10488                 return -EINVAL;
10489         if ((wol->wolopts & WAKE_MAGIC) &&
10490             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10491                 return -EINVAL;
10492
10493         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10494
10495         spin_lock_bh(&tp->lock);
10496         if (device_may_wakeup(dp))
10497                 tg3_flag_set(tp, WOL_ENABLE);
10498         else
10499                 tg3_flag_clear(tp, WOL_ENABLE);
10500         spin_unlock_bh(&tp->lock);
10501
10502         return 0;
10503 }
10504
10505 static u32 tg3_get_msglevel(struct net_device *dev)
10506 {
10507         struct tg3 *tp = netdev_priv(dev);
10508         return tp->msg_enable;
10509 }
10510
10511 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10512 {
10513         struct tg3 *tp = netdev_priv(dev);
10514         tp->msg_enable = value;
10515 }
10516
10517 static int tg3_nway_reset(struct net_device *dev)
10518 {
10519         struct tg3 *tp = netdev_priv(dev);
10520         int r;
10521
10522         if (!netif_running(dev))
10523                 return -EAGAIN;
10524
10525         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10526                 return -EINVAL;
10527
10528         if (tg3_flag(tp, USE_PHYLIB)) {
10529                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10530                         return -EAGAIN;
10531                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10532         } else {
10533                 u32 bmcr;
10534
10535                 spin_lock_bh(&tp->lock);
10536                 r = -EINVAL;
10537                 tg3_readphy(tp, MII_BMCR, &bmcr);
10538                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10539                     ((bmcr & BMCR_ANENABLE) ||
10540                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10541                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10542                                                    BMCR_ANENABLE);
10543                         r = 0;
10544                 }
10545                 spin_unlock_bh(&tp->lock);
10546         }
10547
10548         return r;
10549 }
10550
10551 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10552 {
10553         struct tg3 *tp = netdev_priv(dev);
10554
10555         ering->rx_max_pending = tp->rx_std_ring_mask;
10556         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10557                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10558         else
10559                 ering->rx_jumbo_max_pending = 0;
10560
10561         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10562
10563         ering->rx_pending = tp->rx_pending;
10564         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10565                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10566         else
10567                 ering->rx_jumbo_pending = 0;
10568
10569         ering->tx_pending = tp->napi[0].tx_pending;
10570 }
10571
10572 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10573 {
10574         struct tg3 *tp = netdev_priv(dev);
10575         int i, irq_sync = 0, err = 0;
10576
10577         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10578             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10579             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10580             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10581             (tg3_flag(tp, TSO_BUG) &&
10582              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10583                 return -EINVAL;
10584
10585         if (netif_running(dev)) {
10586                 tg3_phy_stop(tp);
10587                 tg3_netif_stop(tp);
10588                 irq_sync = 1;
10589         }
10590
10591         tg3_full_lock(tp, irq_sync);
10592
10593         tp->rx_pending = ering->rx_pending;
10594
10595         if (tg3_flag(tp, MAX_RXPEND_64) &&
10596             tp->rx_pending > 63)
10597                 tp->rx_pending = 63;
10598         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10599
10600         for (i = 0; i < tp->irq_max; i++)
10601                 tp->napi[i].tx_pending = ering->tx_pending;
10602
10603         if (netif_running(dev)) {
10604                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10605                 err = tg3_restart_hw(tp, 1);
10606                 if (!err)
10607                         tg3_netif_start(tp);
10608         }
10609
10610         tg3_full_unlock(tp);
10611
10612         if (irq_sync && !err)
10613                 tg3_phy_start(tp);
10614
10615         return err;
10616 }
10617
10618 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10619 {
10620         struct tg3 *tp = netdev_priv(dev);
10621
10622         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10623
10624         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10625                 epause->rx_pause = 1;
10626         else
10627                 epause->rx_pause = 0;
10628
10629         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10630                 epause->tx_pause = 1;
10631         else
10632                 epause->tx_pause = 0;
10633 }
10634
10635 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10636 {
10637         struct tg3 *tp = netdev_priv(dev);
10638         int err = 0;
10639
10640         if (tg3_flag(tp, USE_PHYLIB)) {
10641                 u32 newadv;
10642                 struct phy_device *phydev;
10643
10644                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10645
10646                 if (!(phydev->supported & SUPPORTED_Pause) ||
10647                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10648                      (epause->rx_pause != epause->tx_pause)))
10649                         return -EINVAL;
10650
10651                 tp->link_config.flowctrl = 0;
10652                 if (epause->rx_pause) {
10653                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10654
10655                         if (epause->tx_pause) {
10656                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10657                                 newadv = ADVERTISED_Pause;
10658                         } else
10659                                 newadv = ADVERTISED_Pause |
10660                                          ADVERTISED_Asym_Pause;
10661                 } else if (epause->tx_pause) {
10662                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10663                         newadv = ADVERTISED_Asym_Pause;
10664                 } else
10665                         newadv = 0;
10666
10667                 if (epause->autoneg)
10668                         tg3_flag_set(tp, PAUSE_AUTONEG);
10669                 else
10670                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10671
10672                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10673                         u32 oldadv = phydev->advertising &
10674                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10675                         if (oldadv != newadv) {
10676                                 phydev->advertising &=
10677                                         ~(ADVERTISED_Pause |
10678                                           ADVERTISED_Asym_Pause);
10679                                 phydev->advertising |= newadv;
10680                                 if (phydev->autoneg) {
10681                                         /*
10682                                          * Always renegotiate the link to
10683                                          * inform our link partner of our
10684                                          * flow control settings, even if the
10685                                          * flow control is forced.  Let
10686                                          * tg3_adjust_link() do the final
10687                                          * flow control setup.
10688                                          */
10689                                         return phy_start_aneg(phydev);
10690                                 }
10691                         }
10692
10693                         if (!epause->autoneg)
10694                                 tg3_setup_flow_control(tp, 0, 0);
10695                 } else {
10696                         tp->link_config.orig_advertising &=
10697                                         ~(ADVERTISED_Pause |
10698                                           ADVERTISED_Asym_Pause);
10699                         tp->link_config.orig_advertising |= newadv;
10700                 }
10701         } else {
10702                 int irq_sync = 0;
10703
10704                 if (netif_running(dev)) {
10705                         tg3_netif_stop(tp);
10706                         irq_sync = 1;
10707                 }
10708
10709                 tg3_full_lock(tp, irq_sync);
10710
10711                 if (epause->autoneg)
10712                         tg3_flag_set(tp, PAUSE_AUTONEG);
10713                 else
10714                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10715                 if (epause->rx_pause)
10716                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10717                 else
10718                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10719                 if (epause->tx_pause)
10720                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10721                 else
10722                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10723
10724                 if (netif_running(dev)) {
10725                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10726                         err = tg3_restart_hw(tp, 1);
10727                         if (!err)
10728                                 tg3_netif_start(tp);
10729                 }
10730
10731                 tg3_full_unlock(tp);
10732         }
10733
10734         return err;
10735 }
10736
10737 static int tg3_get_sset_count(struct net_device *dev, int sset)
10738 {
10739         switch (sset) {
10740         case ETH_SS_TEST:
10741                 return TG3_NUM_TEST;
10742         case ETH_SS_STATS:
10743                 return TG3_NUM_STATS;
10744         default:
10745                 return -EOPNOTSUPP;
10746         }
10747 }
10748
10749 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10750 {
10751         switch (stringset) {
10752         case ETH_SS_STATS:
10753                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10754                 break;
10755         case ETH_SS_TEST:
10756                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10757                 break;
10758         default:
10759                 WARN_ON(1);     /* we need a WARN() */
10760                 break;
10761         }
10762 }
10763
10764 static int tg3_set_phys_id(struct net_device *dev,
10765                             enum ethtool_phys_id_state state)
10766 {
10767         struct tg3 *tp = netdev_priv(dev);
10768
10769         if (!netif_running(tp->dev))
10770                 return -EAGAIN;
10771
10772         switch (state) {
10773         case ETHTOOL_ID_ACTIVE:
10774                 return 1;       /* cycle on/off once per second */
10775
10776         case ETHTOOL_ID_ON:
10777                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10778                      LED_CTRL_1000MBPS_ON |
10779                      LED_CTRL_100MBPS_ON |
10780                      LED_CTRL_10MBPS_ON |
10781                      LED_CTRL_TRAFFIC_OVERRIDE |
10782                      LED_CTRL_TRAFFIC_BLINK |
10783                      LED_CTRL_TRAFFIC_LED);
10784                 break;
10785
10786         case ETHTOOL_ID_OFF:
10787                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10788                      LED_CTRL_TRAFFIC_OVERRIDE);
10789                 break;
10790
10791         case ETHTOOL_ID_INACTIVE:
10792                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10793                 break;
10794         }
10795
10796         return 0;
10797 }
10798
10799 static void tg3_get_ethtool_stats(struct net_device *dev,
10800                                    struct ethtool_stats *estats, u64 *tmp_stats)
10801 {
10802         struct tg3 *tp = netdev_priv(dev);
10803         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10804 }
10805
10806 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
10807 {
10808         int i;
10809         __be32 *buf;
10810         u32 offset = 0, len = 0;
10811         u32 magic, val;
10812
10813         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10814                 return NULL;
10815
10816         if (magic == TG3_EEPROM_MAGIC) {
10817                 for (offset = TG3_NVM_DIR_START;
10818                      offset < TG3_NVM_DIR_END;
10819                      offset += TG3_NVM_DIRENT_SIZE) {
10820                         if (tg3_nvram_read(tp, offset, &val))
10821                                 return NULL;
10822
10823                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10824                             TG3_NVM_DIRTYPE_EXTVPD)
10825                                 break;
10826                 }
10827
10828                 if (offset != TG3_NVM_DIR_END) {
10829                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10830                         if (tg3_nvram_read(tp, offset + 4, &offset))
10831                                 return NULL;
10832
10833                         offset = tg3_nvram_logical_addr(tp, offset);
10834                 }
10835         }
10836
10837         if (!offset || !len) {
10838                 offset = TG3_NVM_VPD_OFF;
10839                 len = TG3_NVM_VPD_LEN;
10840         }
10841
10842         buf = kmalloc(len, GFP_KERNEL);
10843         if (buf == NULL)
10844                 return NULL;
10845
10846         if (magic == TG3_EEPROM_MAGIC) {
10847                 for (i = 0; i < len; i += 4) {
10848                         /* The data is in little-endian format in NVRAM.
10849                          * Use the big-endian read routines to preserve
10850                          * the byte order as it exists in NVRAM.
10851                          */
10852                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10853                                 goto error;
10854                 }
10855         } else {
10856                 u8 *ptr;
10857                 ssize_t cnt;
10858                 unsigned int pos = 0;
10859
10860                 ptr = (u8 *)&buf[0];
10861                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10862                         cnt = pci_read_vpd(tp->pdev, pos,
10863                                            len - pos, ptr);
10864                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10865                                 cnt = 0;
10866                         else if (cnt < 0)
10867                                 goto error;
10868                 }
10869                 if (pos != len)
10870                         goto error;
10871         }
10872
10873         *vpdlen = len;
10874
10875         return buf;
10876
10877 error:
10878         kfree(buf);
10879         return NULL;
10880 }
10881
10882 #define NVRAM_TEST_SIZE 0x100
10883 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10884 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10885 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10886 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
10887 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
10888 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
10889 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10890 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10891
10892 static int tg3_test_nvram(struct tg3 *tp)
10893 {
10894         u32 csum, magic, len;
10895         __be32 *buf;
10896         int i, j, k, err = 0, size;
10897
10898         if (tg3_flag(tp, NO_NVRAM))
10899                 return 0;
10900
10901         if (tg3_nvram_read(tp, 0, &magic) != 0)
10902                 return -EIO;
10903
10904         if (magic == TG3_EEPROM_MAGIC)
10905                 size = NVRAM_TEST_SIZE;
10906         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10907                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10908                     TG3_EEPROM_SB_FORMAT_1) {
10909                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10910                         case TG3_EEPROM_SB_REVISION_0:
10911                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10912                                 break;
10913                         case TG3_EEPROM_SB_REVISION_2:
10914                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10915                                 break;
10916                         case TG3_EEPROM_SB_REVISION_3:
10917                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10918                                 break;
10919                         case TG3_EEPROM_SB_REVISION_4:
10920                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
10921                                 break;
10922                         case TG3_EEPROM_SB_REVISION_5:
10923                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
10924                                 break;
10925                         case TG3_EEPROM_SB_REVISION_6:
10926                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
10927                                 break;
10928                         default:
10929                                 return -EIO;
10930                         }
10931                 } else
10932                         return 0;
10933         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10934                 size = NVRAM_SELFBOOT_HW_SIZE;
10935         else
10936                 return -EIO;
10937
10938         buf = kmalloc(size, GFP_KERNEL);
10939         if (buf == NULL)
10940                 return -ENOMEM;
10941
10942         err = -EIO;
10943         for (i = 0, j = 0; i < size; i += 4, j++) {
10944                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10945                 if (err)
10946                         break;
10947         }
10948         if (i < size)
10949                 goto out;
10950
10951         /* Selfboot format */
10952         magic = be32_to_cpu(buf[0]);
10953         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10954             TG3_EEPROM_MAGIC_FW) {
10955                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10956
10957                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10958                     TG3_EEPROM_SB_REVISION_2) {
10959                         /* For rev 2, the csum doesn't include the MBA. */
10960                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10961                                 csum8 += buf8[i];
10962                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10963                                 csum8 += buf8[i];
10964                 } else {
10965                         for (i = 0; i < size; i++)
10966                                 csum8 += buf8[i];
10967                 }
10968
10969                 if (csum8 == 0) {
10970                         err = 0;
10971                         goto out;
10972                 }
10973
10974                 err = -EIO;
10975                 goto out;
10976         }
10977
10978         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10979             TG3_EEPROM_MAGIC_HW) {
10980                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10981                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10982                 u8 *buf8 = (u8 *) buf;
10983
10984                 /* Separate the parity bits and the data bytes.  */
10985                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10986                         if ((i == 0) || (i == 8)) {
10987                                 int l;
10988                                 u8 msk;
10989
10990                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10991                                         parity[k++] = buf8[i] & msk;
10992                                 i++;
10993                         } else if (i == 16) {
10994                                 int l;
10995                                 u8 msk;
10996
10997                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10998                                         parity[k++] = buf8[i] & msk;
10999                                 i++;
11000
11001                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11002                                         parity[k++] = buf8[i] & msk;
11003                                 i++;
11004                         }
11005                         data[j++] = buf8[i];
11006                 }
11007
11008                 err = -EIO;
11009                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11010                         u8 hw8 = hweight8(data[i]);
11011
11012                         if ((hw8 & 0x1) && parity[i])
11013                                 goto out;
11014                         else if (!(hw8 & 0x1) && !parity[i])
11015                                 goto out;
11016                 }
11017                 err = 0;
11018                 goto out;
11019         }
11020
11021         err = -EIO;
11022
11023         /* Bootstrap checksum at offset 0x10 */
11024         csum = calc_crc((unsigned char *) buf, 0x10);
11025         if (csum != le32_to_cpu(buf[0x10/4]))
11026                 goto out;
11027
11028         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11029         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11030         if (csum != le32_to_cpu(buf[0xfc/4]))
11031                 goto out;
11032
11033         kfree(buf);
11034
11035         buf = tg3_vpd_readblock(tp, &len);
11036         if (!buf)
11037                 return -ENOMEM;
11038
11039         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11040         if (i > 0) {
11041                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11042                 if (j < 0)
11043                         goto out;
11044
11045                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11046                         goto out;
11047
11048                 i += PCI_VPD_LRDT_TAG_SIZE;
11049                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11050                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11051                 if (j > 0) {
11052                         u8 csum8 = 0;
11053
11054                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11055
11056                         for (i = 0; i <= j; i++)
11057                                 csum8 += ((u8 *)buf)[i];
11058
11059                         if (csum8)
11060                                 goto out;
11061                 }
11062         }
11063
11064         err = 0;
11065
11066 out:
11067         kfree(buf);
11068         return err;
11069 }
11070
11071 #define TG3_SERDES_TIMEOUT_SEC  2
11072 #define TG3_COPPER_TIMEOUT_SEC  6
11073
11074 static int tg3_test_link(struct tg3 *tp)
11075 {
11076         int i, max;
11077
11078         if (!netif_running(tp->dev))
11079                 return -ENODEV;
11080
11081         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11082                 max = TG3_SERDES_TIMEOUT_SEC;
11083         else
11084                 max = TG3_COPPER_TIMEOUT_SEC;
11085
11086         for (i = 0; i < max; i++) {
11087                 if (netif_carrier_ok(tp->dev))
11088                         return 0;
11089
11090                 if (msleep_interruptible(1000))
11091                         break;
11092         }
11093
11094         return -EIO;
11095 }
11096
11097 /* Only test the commonly used registers */
11098 static int tg3_test_registers(struct tg3 *tp)
11099 {
11100         int i, is_5705, is_5750;
11101         u32 offset, read_mask, write_mask, val, save_val, read_val;
11102         static struct {
11103                 u16 offset;
11104                 u16 flags;
11105 #define TG3_FL_5705     0x1
11106 #define TG3_FL_NOT_5705 0x2
11107 #define TG3_FL_NOT_5788 0x4
11108 #define TG3_FL_NOT_5750 0x8
11109                 u32 read_mask;
11110                 u32 write_mask;
11111         } reg_tbl[] = {
11112                 /* MAC Control Registers */
11113                 { MAC_MODE, TG3_FL_NOT_5705,
11114                         0x00000000, 0x00ef6f8c },
11115                 { MAC_MODE, TG3_FL_5705,
11116                         0x00000000, 0x01ef6b8c },
11117                 { MAC_STATUS, TG3_FL_NOT_5705,
11118                         0x03800107, 0x00000000 },
11119                 { MAC_STATUS, TG3_FL_5705,
11120                         0x03800100, 0x00000000 },
11121                 { MAC_ADDR_0_HIGH, 0x0000,
11122                         0x00000000, 0x0000ffff },
11123                 { MAC_ADDR_0_LOW, 0x0000,
11124                         0x00000000, 0xffffffff },
11125                 { MAC_RX_MTU_SIZE, 0x0000,
11126                         0x00000000, 0x0000ffff },
11127                 { MAC_TX_MODE, 0x0000,
11128                         0x00000000, 0x00000070 },
11129                 { MAC_TX_LENGTHS, 0x0000,
11130                         0x00000000, 0x00003fff },
11131                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11132                         0x00000000, 0x000007fc },
11133                 { MAC_RX_MODE, TG3_FL_5705,
11134                         0x00000000, 0x000007dc },
11135                 { MAC_HASH_REG_0, 0x0000,
11136                         0x00000000, 0xffffffff },
11137                 { MAC_HASH_REG_1, 0x0000,
11138                         0x00000000, 0xffffffff },
11139                 { MAC_HASH_REG_2, 0x0000,
11140                         0x00000000, 0xffffffff },
11141                 { MAC_HASH_REG_3, 0x0000,
11142                         0x00000000, 0xffffffff },
11143
11144                 /* Receive Data and Receive BD Initiator Control Registers. */
11145                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11146                         0x00000000, 0xffffffff },
11147                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11148                         0x00000000, 0xffffffff },
11149                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11150                         0x00000000, 0x00000003 },
11151                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11152                         0x00000000, 0xffffffff },
11153                 { RCVDBDI_STD_BD+0, 0x0000,
11154                         0x00000000, 0xffffffff },
11155                 { RCVDBDI_STD_BD+4, 0x0000,
11156                         0x00000000, 0xffffffff },
11157                 { RCVDBDI_STD_BD+8, 0x0000,
11158                         0x00000000, 0xffff0002 },
11159                 { RCVDBDI_STD_BD+0xc, 0x0000,
11160                         0x00000000, 0xffffffff },
11161
11162                 /* Receive BD Initiator Control Registers. */
11163                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11164                         0x00000000, 0xffffffff },
11165                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11166                         0x00000000, 0x000003ff },
11167                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11168                         0x00000000, 0xffffffff },
11169
11170                 /* Host Coalescing Control Registers. */
11171                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11172                         0x00000000, 0x00000004 },
11173                 { HOSTCC_MODE, TG3_FL_5705,
11174                         0x00000000, 0x000000f6 },
11175                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11176                         0x00000000, 0xffffffff },
11177                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11178                         0x00000000, 0x000003ff },
11179                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11180                         0x00000000, 0xffffffff },
11181                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11182                         0x00000000, 0x000003ff },
11183                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11184                         0x00000000, 0xffffffff },
11185                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11186                         0x00000000, 0x000000ff },
11187                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11188                         0x00000000, 0xffffffff },
11189                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11190                         0x00000000, 0x000000ff },
11191                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11192                         0x00000000, 0xffffffff },
11193                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11194                         0x00000000, 0xffffffff },
11195                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11196                         0x00000000, 0xffffffff },
11197                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11198                         0x00000000, 0x000000ff },
11199                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11200                         0x00000000, 0xffffffff },
11201                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11202                         0x00000000, 0x000000ff },
11203                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11204                         0x00000000, 0xffffffff },
11205                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11206                         0x00000000, 0xffffffff },
11207                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11208                         0x00000000, 0xffffffff },
11209                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11210                         0x00000000, 0xffffffff },
11211                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11212                         0x00000000, 0xffffffff },
11213                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11214                         0xffffffff, 0x00000000 },
11215                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11216                         0xffffffff, 0x00000000 },
11217
11218                 /* Buffer Manager Control Registers. */
11219                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11220                         0x00000000, 0x007fff80 },
11221                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11222                         0x00000000, 0x007fffff },
11223                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11224                         0x00000000, 0x0000003f },
11225                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11226                         0x00000000, 0x000001ff },
11227                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11228                         0x00000000, 0x000001ff },
11229                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11230                         0xffffffff, 0x00000000 },
11231                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11232                         0xffffffff, 0x00000000 },
11233
11234                 /* Mailbox Registers */
11235                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11236                         0x00000000, 0x000001ff },
11237                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11238                         0x00000000, 0x000001ff },
11239                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11240                         0x00000000, 0x000007ff },
11241                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11242                         0x00000000, 0x000001ff },
11243
11244                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11245         };
11246
11247         is_5705 = is_5750 = 0;
11248         if (tg3_flag(tp, 5705_PLUS)) {
11249                 is_5705 = 1;
11250                 if (tg3_flag(tp, 5750_PLUS))
11251                         is_5750 = 1;
11252         }
11253
11254         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11255                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11256                         continue;
11257
11258                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11259                         continue;
11260
11261                 if (tg3_flag(tp, IS_5788) &&
11262                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11263                         continue;
11264
11265                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11266                         continue;
11267
11268                 offset = (u32) reg_tbl[i].offset;
11269                 read_mask = reg_tbl[i].read_mask;
11270                 write_mask = reg_tbl[i].write_mask;
11271
11272                 /* Save the original register content */
11273                 save_val = tr32(offset);
11274
11275                 /* Determine the read-only value. */
11276                 read_val = save_val & read_mask;
11277
11278                 /* Write zero to the register, then make sure the read-only bits
11279                  * are not changed and the read/write bits are all zeros.
11280                  */
11281                 tw32(offset, 0);
11282
11283                 val = tr32(offset);
11284
11285                 /* Test the read-only and read/write bits. */
11286                 if (((val & read_mask) != read_val) || (val & write_mask))
11287                         goto out;
11288
11289                 /* Write ones to all the bits defined by RdMask and WrMask, then
11290                  * make sure the read-only bits are not changed and the
11291                  * read/write bits are all ones.
11292                  */
11293                 tw32(offset, read_mask | write_mask);
11294
11295                 val = tr32(offset);
11296
11297                 /* Test the read-only bits. */
11298                 if ((val & read_mask) != read_val)
11299                         goto out;
11300
11301                 /* Test the read/write bits. */
11302                 if ((val & write_mask) != write_mask)
11303                         goto out;
11304
11305                 tw32(offset, save_val);
11306         }
11307
11308         return 0;
11309
11310 out:
11311         if (netif_msg_hw(tp))
11312                 netdev_err(tp->dev,
11313                            "Register test failed at offset %x\n", offset);
11314         tw32(offset, save_val);
11315         return -EIO;
11316 }
11317
11318 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11319 {
11320         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11321         int i;
11322         u32 j;
11323
11324         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11325                 for (j = 0; j < len; j += 4) {
11326                         u32 val;
11327
11328                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11329                         tg3_read_mem(tp, offset + j, &val);
11330                         if (val != test_pattern[i])
11331                                 return -EIO;
11332                 }
11333         }
11334         return 0;
11335 }
11336
11337 static int tg3_test_memory(struct tg3 *tp)
11338 {
11339         static struct mem_entry {
11340                 u32 offset;
11341                 u32 len;
11342         } mem_tbl_570x[] = {
11343                 { 0x00000000, 0x00b50},
11344                 { 0x00002000, 0x1c000},
11345                 { 0xffffffff, 0x00000}
11346         }, mem_tbl_5705[] = {
11347                 { 0x00000100, 0x0000c},
11348                 { 0x00000200, 0x00008},
11349                 { 0x00004000, 0x00800},
11350                 { 0x00006000, 0x01000},
11351                 { 0x00008000, 0x02000},
11352                 { 0x00010000, 0x0e000},
11353                 { 0xffffffff, 0x00000}
11354         }, mem_tbl_5755[] = {
11355                 { 0x00000200, 0x00008},
11356                 { 0x00004000, 0x00800},
11357                 { 0x00006000, 0x00800},
11358                 { 0x00008000, 0x02000},
11359                 { 0x00010000, 0x0c000},
11360                 { 0xffffffff, 0x00000}
11361         }, mem_tbl_5906[] = {
11362                 { 0x00000200, 0x00008},
11363                 { 0x00004000, 0x00400},
11364                 { 0x00006000, 0x00400},
11365                 { 0x00008000, 0x01000},
11366                 { 0x00010000, 0x01000},
11367                 { 0xffffffff, 0x00000}
11368         }, mem_tbl_5717[] = {
11369                 { 0x00000200, 0x00008},
11370                 { 0x00010000, 0x0a000},
11371                 { 0x00020000, 0x13c00},
11372                 { 0xffffffff, 0x00000}
11373         }, mem_tbl_57765[] = {
11374                 { 0x00000200, 0x00008},
11375                 { 0x00004000, 0x00800},
11376                 { 0x00006000, 0x09800},
11377                 { 0x00010000, 0x0a000},
11378                 { 0xffffffff, 0x00000}
11379         };
11380         struct mem_entry *mem_tbl;
11381         int err = 0;
11382         int i;
11383
11384         if (tg3_flag(tp, 5717_PLUS))
11385                 mem_tbl = mem_tbl_5717;
11386         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11387                 mem_tbl = mem_tbl_57765;
11388         else if (tg3_flag(tp, 5755_PLUS))
11389                 mem_tbl = mem_tbl_5755;
11390         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11391                 mem_tbl = mem_tbl_5906;
11392         else if (tg3_flag(tp, 5705_PLUS))
11393                 mem_tbl = mem_tbl_5705;
11394         else
11395                 mem_tbl = mem_tbl_570x;
11396
11397         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11398                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11399                 if (err)
11400                         break;
11401         }
11402
11403         return err;
11404 }
11405
11406 #define TG3_TSO_MSS             500
11407
11408 #define TG3_TSO_IP_HDR_LEN      20
11409 #define TG3_TSO_TCP_HDR_LEN     20
11410 #define TG3_TSO_TCP_OPT_LEN     12
11411
11412 static const u8 tg3_tso_header[] = {
11413 0x08, 0x00,
11414 0x45, 0x00, 0x00, 0x00,
11415 0x00, 0x00, 0x40, 0x00,
11416 0x40, 0x06, 0x00, 0x00,
11417 0x0a, 0x00, 0x00, 0x01,
11418 0x0a, 0x00, 0x00, 0x02,
11419 0x0d, 0x00, 0xe0, 0x00,
11420 0x00, 0x00, 0x01, 0x00,
11421 0x00, 0x00, 0x02, 0x00,
11422 0x80, 0x10, 0x10, 0x00,
11423 0x14, 0x09, 0x00, 0x00,
11424 0x01, 0x01, 0x08, 0x0a,
11425 0x11, 0x11, 0x11, 0x11,
11426 0x11, 0x11, 0x11, 0x11,
11427 };
11428
11429 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11430 {
11431         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11432         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11433         u32 budget;
11434         struct sk_buff *skb, *rx_skb;
11435         u8 *tx_data;
11436         dma_addr_t map;
11437         int num_pkts, tx_len, rx_len, i, err;
11438         struct tg3_rx_buffer_desc *desc;
11439         struct tg3_napi *tnapi, *rnapi;
11440         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11441
11442         tnapi = &tp->napi[0];
11443         rnapi = &tp->napi[0];
11444         if (tp->irq_cnt > 1) {
11445                 if (tg3_flag(tp, ENABLE_RSS))
11446                         rnapi = &tp->napi[1];
11447                 if (tg3_flag(tp, ENABLE_TSS))
11448                         tnapi = &tp->napi[1];
11449         }
11450         coal_now = tnapi->coal_now | rnapi->coal_now;
11451
11452         err = -EIO;
11453
11454         tx_len = pktsz;
11455         skb = netdev_alloc_skb(tp->dev, tx_len);
11456         if (!skb)
11457                 return -ENOMEM;
11458
11459         tx_data = skb_put(skb, tx_len);
11460         memcpy(tx_data, tp->dev->dev_addr, 6);
11461         memset(tx_data + 6, 0x0, 8);
11462
11463         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11464
11465         if (tso_loopback) {
11466                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11467
11468                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11469                               TG3_TSO_TCP_OPT_LEN;
11470
11471                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11472                        sizeof(tg3_tso_header));
11473                 mss = TG3_TSO_MSS;
11474
11475                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11476                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11477
11478                 /* Set the total length field in the IP header */
11479                 iph->tot_len = htons((u16)(mss + hdr_len));
11480
11481                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11482                               TXD_FLAG_CPU_POST_DMA);
11483
11484                 if (tg3_flag(tp, HW_TSO_1) ||
11485                     tg3_flag(tp, HW_TSO_2) ||
11486                     tg3_flag(tp, HW_TSO_3)) {
11487                         struct tcphdr *th;
11488                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11489                         th = (struct tcphdr *)&tx_data[val];
11490                         th->check = 0;
11491                 } else
11492                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11493
11494                 if (tg3_flag(tp, HW_TSO_3)) {
11495                         mss |= (hdr_len & 0xc) << 12;
11496                         if (hdr_len & 0x10)
11497                                 base_flags |= 0x00000010;
11498                         base_flags |= (hdr_len & 0x3e0) << 5;
11499                 } else if (tg3_flag(tp, HW_TSO_2))
11500                         mss |= hdr_len << 9;
11501                 else if (tg3_flag(tp, HW_TSO_1) ||
11502                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11503                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11504                 } else {
11505                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11506                 }
11507
11508                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11509         } else {
11510                 num_pkts = 1;
11511                 data_off = ETH_HLEN;
11512         }
11513
11514         for (i = data_off; i < tx_len; i++)
11515                 tx_data[i] = (u8) (i & 0xff);
11516
11517         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11518         if (pci_dma_mapping_error(tp->pdev, map)) {
11519                 dev_kfree_skb(skb);
11520                 return -EIO;
11521         }
11522
11523         val = tnapi->tx_prod;
11524         tnapi->tx_buffers[val].skb = skb;
11525         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11526
11527         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11528                rnapi->coal_now);
11529
11530         udelay(10);
11531
11532         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11533
11534         budget = tg3_tx_avail(tnapi);
11535         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11536                             base_flags | TXD_FLAG_END, mss, 0)) {
11537                 tnapi->tx_buffers[val].skb = NULL;
11538                 dev_kfree_skb(skb);
11539                 return -EIO;
11540         }
11541
11542         tnapi->tx_prod++;
11543
11544         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11545         tr32_mailbox(tnapi->prodmbox);
11546
11547         udelay(10);
11548
11549         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11550         for (i = 0; i < 35; i++) {
11551                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11552                        coal_now);
11553
11554                 udelay(10);
11555
11556                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11557                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11558                 if ((tx_idx == tnapi->tx_prod) &&
11559                     (rx_idx == (rx_start_idx + num_pkts)))
11560                         break;
11561         }
11562
11563         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11564         dev_kfree_skb(skb);
11565
11566         if (tx_idx != tnapi->tx_prod)
11567                 goto out;
11568
11569         if (rx_idx != rx_start_idx + num_pkts)
11570                 goto out;
11571
11572         val = data_off;
11573         while (rx_idx != rx_start_idx) {
11574                 desc = &rnapi->rx_rcb[rx_start_idx++];
11575                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11576                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11577
11578                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11579                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11580                         goto out;
11581
11582                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11583                          - ETH_FCS_LEN;
11584
11585                 if (!tso_loopback) {
11586                         if (rx_len != tx_len)
11587                                 goto out;
11588
11589                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11590                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11591                                         goto out;
11592                         } else {
11593                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11594                                         goto out;
11595                         }
11596                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11597                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11598                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11599                         goto out;
11600                 }
11601
11602                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11603                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11604                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11605                                              mapping);
11606                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11607                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11608                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11609                                              mapping);
11610                 } else
11611                         goto out;
11612
11613                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11614                                             PCI_DMA_FROMDEVICE);
11615
11616                 for (i = data_off; i < rx_len; i++, val++) {
11617                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11618                                 goto out;
11619                 }
11620         }
11621
11622         err = 0;
11623
11624         /* tg3_free_rings will unmap and free the rx_skb */
11625 out:
11626         return err;
11627 }
11628
11629 #define TG3_STD_LOOPBACK_FAILED         1
11630 #define TG3_JMB_LOOPBACK_FAILED         2
11631 #define TG3_TSO_LOOPBACK_FAILED         4
11632 #define TG3_LOOPBACK_FAILED \
11633         (TG3_STD_LOOPBACK_FAILED | \
11634          TG3_JMB_LOOPBACK_FAILED | \
11635          TG3_TSO_LOOPBACK_FAILED)
11636
11637 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11638 {
11639         int err = -EIO;
11640         u32 eee_cap;
11641
11642         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11643         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11644
11645         if (!netif_running(tp->dev)) {
11646                 data[0] = TG3_LOOPBACK_FAILED;
11647                 data[1] = TG3_LOOPBACK_FAILED;
11648                 if (do_extlpbk)
11649                         data[2] = TG3_LOOPBACK_FAILED;
11650                 goto done;
11651         }
11652
11653         err = tg3_reset_hw(tp, 1);
11654         if (err) {
11655                 data[0] = TG3_LOOPBACK_FAILED;
11656                 data[1] = TG3_LOOPBACK_FAILED;
11657                 if (do_extlpbk)
11658                         data[2] = TG3_LOOPBACK_FAILED;
11659                 goto done;
11660         }
11661
11662         if (tg3_flag(tp, ENABLE_RSS)) {
11663                 int i;
11664
11665                 /* Reroute all rx packets to the 1st queue */
11666                 for (i = MAC_RSS_INDIR_TBL_0;
11667                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11668                         tw32(i, 0x0);
11669         }
11670
11671         /* HW errata - mac loopback fails in some cases on 5780.
11672          * Normal traffic and PHY loopback are not affected by
11673          * errata.  Also, the MAC loopback test is deprecated for
11674          * all newer ASIC revisions.
11675          */
11676         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11677             !tg3_flag(tp, CPMU_PRESENT)) {
11678                 tg3_mac_loopback(tp, true);
11679
11680                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11681                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11682
11683                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11684                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11685                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11686
11687                 tg3_mac_loopback(tp, false);
11688         }
11689
11690         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11691             !tg3_flag(tp, USE_PHYLIB)) {
11692                 int i;
11693
11694                 tg3_phy_lpbk_set(tp, 0, false);
11695
11696                 /* Wait for link */
11697                 for (i = 0; i < 100; i++) {
11698                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11699                                 break;
11700                         mdelay(1);
11701                 }
11702
11703                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11704                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11705                 if (tg3_flag(tp, TSO_CAPABLE) &&
11706                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11707                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11708                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11709                     tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11710                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11711
11712                 if (do_extlpbk) {
11713                         tg3_phy_lpbk_set(tp, 0, true);
11714
11715                         /* All link indications report up, but the hardware
11716                          * isn't really ready for about 20 msec.  Double it
11717                          * to be sure.
11718                          */
11719                         mdelay(40);
11720
11721                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11722                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11723                         if (tg3_flag(tp, TSO_CAPABLE) &&
11724                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11725                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11726                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11727                             tg3_run_loopback(tp, 9000 + ETH_HLEN, false))
11728                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11729                 }
11730
11731                 /* Re-enable gphy autopowerdown. */
11732                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11733                         tg3_phy_toggle_apd(tp, true);
11734         }
11735
11736         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11737
11738 done:
11739         tp->phy_flags |= eee_cap;
11740
11741         return err;
11742 }
11743
11744 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11745                           u64 *data)
11746 {
11747         struct tg3 *tp = netdev_priv(dev);
11748         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11749
11750         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11751             tg3_power_up(tp)) {
11752                 etest->flags |= ETH_TEST_FL_FAILED;
11753                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11754                 return;
11755         }
11756
11757         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11758
11759         if (tg3_test_nvram(tp) != 0) {
11760                 etest->flags |= ETH_TEST_FL_FAILED;
11761                 data[0] = 1;
11762         }
11763         if (!doextlpbk && tg3_test_link(tp)) {
11764                 etest->flags |= ETH_TEST_FL_FAILED;
11765                 data[1] = 1;
11766         }
11767         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11768                 int err, err2 = 0, irq_sync = 0;
11769
11770                 if (netif_running(dev)) {
11771                         tg3_phy_stop(tp);
11772                         tg3_netif_stop(tp);
11773                         irq_sync = 1;
11774                 }
11775
11776                 tg3_full_lock(tp, irq_sync);
11777
11778                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11779                 err = tg3_nvram_lock(tp);
11780                 tg3_halt_cpu(tp, RX_CPU_BASE);
11781                 if (!tg3_flag(tp, 5705_PLUS))
11782                         tg3_halt_cpu(tp, TX_CPU_BASE);
11783                 if (!err)
11784                         tg3_nvram_unlock(tp);
11785
11786                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11787                         tg3_phy_reset(tp);
11788
11789                 if (tg3_test_registers(tp) != 0) {
11790                         etest->flags |= ETH_TEST_FL_FAILED;
11791                         data[2] = 1;
11792                 }
11793
11794                 if (tg3_test_memory(tp) != 0) {
11795                         etest->flags |= ETH_TEST_FL_FAILED;
11796                         data[3] = 1;
11797                 }
11798
11799                 if (doextlpbk)
11800                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
11801
11802                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
11803                         etest->flags |= ETH_TEST_FL_FAILED;
11804
11805                 tg3_full_unlock(tp);
11806
11807                 if (tg3_test_interrupt(tp) != 0) {
11808                         etest->flags |= ETH_TEST_FL_FAILED;
11809                         data[7] = 1;
11810                 }
11811
11812                 tg3_full_lock(tp, 0);
11813
11814                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11815                 if (netif_running(dev)) {
11816                         tg3_flag_set(tp, INIT_COMPLETE);
11817                         err2 = tg3_restart_hw(tp, 1);
11818                         if (!err2)
11819                                 tg3_netif_start(tp);
11820                 }
11821
11822                 tg3_full_unlock(tp);
11823
11824                 if (irq_sync && !err2)
11825                         tg3_phy_start(tp);
11826         }
11827         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11828                 tg3_power_down(tp);
11829
11830 }
11831
11832 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11833 {
11834         struct mii_ioctl_data *data = if_mii(ifr);
11835         struct tg3 *tp = netdev_priv(dev);
11836         int err;
11837
11838         if (tg3_flag(tp, USE_PHYLIB)) {
11839                 struct phy_device *phydev;
11840                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11841                         return -EAGAIN;
11842                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11843                 return phy_mii_ioctl(phydev, ifr, cmd);
11844         }
11845
11846         switch (cmd) {
11847         case SIOCGMIIPHY:
11848                 data->phy_id = tp->phy_addr;
11849
11850                 /* fallthru */
11851         case SIOCGMIIREG: {
11852                 u32 mii_regval;
11853
11854                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11855                         break;                  /* We have no PHY */
11856
11857                 if (!netif_running(dev))
11858                         return -EAGAIN;
11859
11860                 spin_lock_bh(&tp->lock);
11861                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11862                 spin_unlock_bh(&tp->lock);
11863
11864                 data->val_out = mii_regval;
11865
11866                 return err;
11867         }
11868
11869         case SIOCSMIIREG:
11870                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11871                         break;                  /* We have no PHY */
11872
11873                 if (!netif_running(dev))
11874                         return -EAGAIN;
11875
11876                 spin_lock_bh(&tp->lock);
11877                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11878                 spin_unlock_bh(&tp->lock);
11879
11880                 return err;
11881
11882         default:
11883                 /* do nothing */
11884                 break;
11885         }
11886         return -EOPNOTSUPP;
11887 }
11888
11889 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11890 {
11891         struct tg3 *tp = netdev_priv(dev);
11892
11893         memcpy(ec, &tp->coal, sizeof(*ec));
11894         return 0;
11895 }
11896
11897 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11898 {
11899         struct tg3 *tp = netdev_priv(dev);
11900         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11901         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11902
11903         if (!tg3_flag(tp, 5705_PLUS)) {
11904                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11905                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11906                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11907                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11908         }
11909
11910         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11911             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11912             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11913             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11914             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11915             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11916             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11917             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11918             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11919             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11920                 return -EINVAL;
11921
11922         /* No rx interrupts will be generated if both are zero */
11923         if ((ec->rx_coalesce_usecs == 0) &&
11924             (ec->rx_max_coalesced_frames == 0))
11925                 return -EINVAL;
11926
11927         /* No tx interrupts will be generated if both are zero */
11928         if ((ec->tx_coalesce_usecs == 0) &&
11929             (ec->tx_max_coalesced_frames == 0))
11930                 return -EINVAL;
11931
11932         /* Only copy relevant parameters, ignore all others. */
11933         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11934         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11935         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11936         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11937         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11938         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11939         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11940         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11941         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11942
11943         if (netif_running(dev)) {
11944                 tg3_full_lock(tp, 0);
11945                 __tg3_set_coalesce(tp, &tp->coal);
11946                 tg3_full_unlock(tp);
11947         }
11948         return 0;
11949 }
11950
11951 static const struct ethtool_ops tg3_ethtool_ops = {
11952         .get_settings           = tg3_get_settings,
11953         .set_settings           = tg3_set_settings,
11954         .get_drvinfo            = tg3_get_drvinfo,
11955         .get_regs_len           = tg3_get_regs_len,
11956         .get_regs               = tg3_get_regs,
11957         .get_wol                = tg3_get_wol,
11958         .set_wol                = tg3_set_wol,
11959         .get_msglevel           = tg3_get_msglevel,
11960         .set_msglevel           = tg3_set_msglevel,
11961         .nway_reset             = tg3_nway_reset,
11962         .get_link               = ethtool_op_get_link,
11963         .get_eeprom_len         = tg3_get_eeprom_len,
11964         .get_eeprom             = tg3_get_eeprom,
11965         .set_eeprom             = tg3_set_eeprom,
11966         .get_ringparam          = tg3_get_ringparam,
11967         .set_ringparam          = tg3_set_ringparam,
11968         .get_pauseparam         = tg3_get_pauseparam,
11969         .set_pauseparam         = tg3_set_pauseparam,
11970         .self_test              = tg3_self_test,
11971         .get_strings            = tg3_get_strings,
11972         .set_phys_id            = tg3_set_phys_id,
11973         .get_ethtool_stats      = tg3_get_ethtool_stats,
11974         .get_coalesce           = tg3_get_coalesce,
11975         .set_coalesce           = tg3_set_coalesce,
11976         .get_sset_count         = tg3_get_sset_count,
11977 };
11978
11979 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11980 {
11981         u32 cursize, val, magic;
11982
11983         tp->nvram_size = EEPROM_CHIP_SIZE;
11984
11985         if (tg3_nvram_read(tp, 0, &magic) != 0)
11986                 return;
11987
11988         if ((magic != TG3_EEPROM_MAGIC) &&
11989             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11990             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11991                 return;
11992
11993         /*
11994          * Size the chip by reading offsets at increasing powers of two.
11995          * When we encounter our validation signature, we know the addressing
11996          * has wrapped around, and thus have our chip size.
11997          */
11998         cursize = 0x10;
11999
12000         while (cursize < tp->nvram_size) {
12001                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12002                         return;
12003
12004                 if (val == magic)
12005                         break;
12006
12007                 cursize <<= 1;
12008         }
12009
12010         tp->nvram_size = cursize;
12011 }
12012
12013 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12014 {
12015         u32 val;
12016
12017         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12018                 return;
12019
12020         /* Selfboot format */
12021         if (val != TG3_EEPROM_MAGIC) {
12022                 tg3_get_eeprom_size(tp);
12023                 return;
12024         }
12025
12026         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12027                 if (val != 0) {
12028                         /* This is confusing.  We want to operate on the
12029                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12030                          * call will read from NVRAM and byteswap the data
12031                          * according to the byteswapping settings for all
12032                          * other register accesses.  This ensures the data we
12033                          * want will always reside in the lower 16-bits.
12034                          * However, the data in NVRAM is in LE format, which
12035                          * means the data from the NVRAM read will always be
12036                          * opposite the endianness of the CPU.  The 16-bit
12037                          * byteswap then brings the data to CPU endianness.
12038                          */
12039                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12040                         return;
12041                 }
12042         }
12043         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12044 }
12045
12046 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12047 {
12048         u32 nvcfg1;
12049
12050         nvcfg1 = tr32(NVRAM_CFG1);
12051         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12052                 tg3_flag_set(tp, FLASH);
12053         } else {
12054                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12055                 tw32(NVRAM_CFG1, nvcfg1);
12056         }
12057
12058         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12059             tg3_flag(tp, 5780_CLASS)) {
12060                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12061                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12062                         tp->nvram_jedecnum = JEDEC_ATMEL;
12063                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12064                         tg3_flag_set(tp, NVRAM_BUFFERED);
12065                         break;
12066                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12067                         tp->nvram_jedecnum = JEDEC_ATMEL;
12068                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12069                         break;
12070                 case FLASH_VENDOR_ATMEL_EEPROM:
12071                         tp->nvram_jedecnum = JEDEC_ATMEL;
12072                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12073                         tg3_flag_set(tp, NVRAM_BUFFERED);
12074                         break;
12075                 case FLASH_VENDOR_ST:
12076                         tp->nvram_jedecnum = JEDEC_ST;
12077                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12078                         tg3_flag_set(tp, NVRAM_BUFFERED);
12079                         break;
12080                 case FLASH_VENDOR_SAIFUN:
12081                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12082                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12083                         break;
12084                 case FLASH_VENDOR_SST_SMALL:
12085                 case FLASH_VENDOR_SST_LARGE:
12086                         tp->nvram_jedecnum = JEDEC_SST;
12087                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12088                         break;
12089                 }
12090         } else {
12091                 tp->nvram_jedecnum = JEDEC_ATMEL;
12092                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12093                 tg3_flag_set(tp, NVRAM_BUFFERED);
12094         }
12095 }
12096
12097 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12098 {
12099         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12100         case FLASH_5752PAGE_SIZE_256:
12101                 tp->nvram_pagesize = 256;
12102                 break;
12103         case FLASH_5752PAGE_SIZE_512:
12104                 tp->nvram_pagesize = 512;
12105                 break;
12106         case FLASH_5752PAGE_SIZE_1K:
12107                 tp->nvram_pagesize = 1024;
12108                 break;
12109         case FLASH_5752PAGE_SIZE_2K:
12110                 tp->nvram_pagesize = 2048;
12111                 break;
12112         case FLASH_5752PAGE_SIZE_4K:
12113                 tp->nvram_pagesize = 4096;
12114                 break;
12115         case FLASH_5752PAGE_SIZE_264:
12116                 tp->nvram_pagesize = 264;
12117                 break;
12118         case FLASH_5752PAGE_SIZE_528:
12119                 tp->nvram_pagesize = 528;
12120                 break;
12121         }
12122 }
12123
12124 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12125 {
12126         u32 nvcfg1;
12127
12128         nvcfg1 = tr32(NVRAM_CFG1);
12129
12130         /* NVRAM protection for TPM */
12131         if (nvcfg1 & (1 << 27))
12132                 tg3_flag_set(tp, PROTECTED_NVRAM);
12133
12134         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12135         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12136         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12137                 tp->nvram_jedecnum = JEDEC_ATMEL;
12138                 tg3_flag_set(tp, NVRAM_BUFFERED);
12139                 break;
12140         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12141                 tp->nvram_jedecnum = JEDEC_ATMEL;
12142                 tg3_flag_set(tp, NVRAM_BUFFERED);
12143                 tg3_flag_set(tp, FLASH);
12144                 break;
12145         case FLASH_5752VENDOR_ST_M45PE10:
12146         case FLASH_5752VENDOR_ST_M45PE20:
12147         case FLASH_5752VENDOR_ST_M45PE40:
12148                 tp->nvram_jedecnum = JEDEC_ST;
12149                 tg3_flag_set(tp, NVRAM_BUFFERED);
12150                 tg3_flag_set(tp, FLASH);
12151                 break;
12152         }
12153
12154         if (tg3_flag(tp, FLASH)) {
12155                 tg3_nvram_get_pagesize(tp, nvcfg1);
12156         } else {
12157                 /* For eeprom, set pagesize to maximum eeprom size */
12158                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12159
12160                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12161                 tw32(NVRAM_CFG1, nvcfg1);
12162         }
12163 }
12164
12165 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12166 {
12167         u32 nvcfg1, protect = 0;
12168
12169         nvcfg1 = tr32(NVRAM_CFG1);
12170
12171         /* NVRAM protection for TPM */
12172         if (nvcfg1 & (1 << 27)) {
12173                 tg3_flag_set(tp, PROTECTED_NVRAM);
12174                 protect = 1;
12175         }
12176
12177         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12178         switch (nvcfg1) {
12179         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12180         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12181         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12182         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12183                 tp->nvram_jedecnum = JEDEC_ATMEL;
12184                 tg3_flag_set(tp, NVRAM_BUFFERED);
12185                 tg3_flag_set(tp, FLASH);
12186                 tp->nvram_pagesize = 264;
12187                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12188                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12189                         tp->nvram_size = (protect ? 0x3e200 :
12190                                           TG3_NVRAM_SIZE_512KB);
12191                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12192                         tp->nvram_size = (protect ? 0x1f200 :
12193                                           TG3_NVRAM_SIZE_256KB);
12194                 else
12195                         tp->nvram_size = (protect ? 0x1f200 :
12196                                           TG3_NVRAM_SIZE_128KB);
12197                 break;
12198         case FLASH_5752VENDOR_ST_M45PE10:
12199         case FLASH_5752VENDOR_ST_M45PE20:
12200         case FLASH_5752VENDOR_ST_M45PE40:
12201                 tp->nvram_jedecnum = JEDEC_ST;
12202                 tg3_flag_set(tp, NVRAM_BUFFERED);
12203                 tg3_flag_set(tp, FLASH);
12204                 tp->nvram_pagesize = 256;
12205                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12206                         tp->nvram_size = (protect ?
12207                                           TG3_NVRAM_SIZE_64KB :
12208                                           TG3_NVRAM_SIZE_128KB);
12209                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12210                         tp->nvram_size = (protect ?
12211                                           TG3_NVRAM_SIZE_64KB :
12212                                           TG3_NVRAM_SIZE_256KB);
12213                 else
12214                         tp->nvram_size = (protect ?
12215                                           TG3_NVRAM_SIZE_128KB :
12216                                           TG3_NVRAM_SIZE_512KB);
12217                 break;
12218         }
12219 }
12220
12221 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12222 {
12223         u32 nvcfg1;
12224
12225         nvcfg1 = tr32(NVRAM_CFG1);
12226
12227         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12228         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12229         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12230         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12231         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12232                 tp->nvram_jedecnum = JEDEC_ATMEL;
12233                 tg3_flag_set(tp, NVRAM_BUFFERED);
12234                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12235
12236                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12237                 tw32(NVRAM_CFG1, nvcfg1);
12238                 break;
12239         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12240         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12241         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12242         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12243                 tp->nvram_jedecnum = JEDEC_ATMEL;
12244                 tg3_flag_set(tp, NVRAM_BUFFERED);
12245                 tg3_flag_set(tp, FLASH);
12246                 tp->nvram_pagesize = 264;
12247                 break;
12248         case FLASH_5752VENDOR_ST_M45PE10:
12249         case FLASH_5752VENDOR_ST_M45PE20:
12250         case FLASH_5752VENDOR_ST_M45PE40:
12251                 tp->nvram_jedecnum = JEDEC_ST;
12252                 tg3_flag_set(tp, NVRAM_BUFFERED);
12253                 tg3_flag_set(tp, FLASH);
12254                 tp->nvram_pagesize = 256;
12255                 break;
12256         }
12257 }
12258
12259 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12260 {
12261         u32 nvcfg1, protect = 0;
12262
12263         nvcfg1 = tr32(NVRAM_CFG1);
12264
12265         /* NVRAM protection for TPM */
12266         if (nvcfg1 & (1 << 27)) {
12267                 tg3_flag_set(tp, PROTECTED_NVRAM);
12268                 protect = 1;
12269         }
12270
12271         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12272         switch (nvcfg1) {
12273         case FLASH_5761VENDOR_ATMEL_ADB021D:
12274         case FLASH_5761VENDOR_ATMEL_ADB041D:
12275         case FLASH_5761VENDOR_ATMEL_ADB081D:
12276         case FLASH_5761VENDOR_ATMEL_ADB161D:
12277         case FLASH_5761VENDOR_ATMEL_MDB021D:
12278         case FLASH_5761VENDOR_ATMEL_MDB041D:
12279         case FLASH_5761VENDOR_ATMEL_MDB081D:
12280         case FLASH_5761VENDOR_ATMEL_MDB161D:
12281                 tp->nvram_jedecnum = JEDEC_ATMEL;
12282                 tg3_flag_set(tp, NVRAM_BUFFERED);
12283                 tg3_flag_set(tp, FLASH);
12284                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12285                 tp->nvram_pagesize = 256;
12286                 break;
12287         case FLASH_5761VENDOR_ST_A_M45PE20:
12288         case FLASH_5761VENDOR_ST_A_M45PE40:
12289         case FLASH_5761VENDOR_ST_A_M45PE80:
12290         case FLASH_5761VENDOR_ST_A_M45PE16:
12291         case FLASH_5761VENDOR_ST_M_M45PE20:
12292         case FLASH_5761VENDOR_ST_M_M45PE40:
12293         case FLASH_5761VENDOR_ST_M_M45PE80:
12294         case FLASH_5761VENDOR_ST_M_M45PE16:
12295                 tp->nvram_jedecnum = JEDEC_ST;
12296                 tg3_flag_set(tp, NVRAM_BUFFERED);
12297                 tg3_flag_set(tp, FLASH);
12298                 tp->nvram_pagesize = 256;
12299                 break;
12300         }
12301
12302         if (protect) {
12303                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12304         } else {
12305                 switch (nvcfg1) {
12306                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12307                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12308                 case FLASH_5761VENDOR_ST_A_M45PE16:
12309                 case FLASH_5761VENDOR_ST_M_M45PE16:
12310                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12311                         break;
12312                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12313                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12314                 case FLASH_5761VENDOR_ST_A_M45PE80:
12315                 case FLASH_5761VENDOR_ST_M_M45PE80:
12316                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12317                         break;
12318                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12319                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12320                 case FLASH_5761VENDOR_ST_A_M45PE40:
12321                 case FLASH_5761VENDOR_ST_M_M45PE40:
12322                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12323                         break;
12324                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12325                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12326                 case FLASH_5761VENDOR_ST_A_M45PE20:
12327                 case FLASH_5761VENDOR_ST_M_M45PE20:
12328                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12329                         break;
12330                 }
12331         }
12332 }
12333
12334 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12335 {
12336         tp->nvram_jedecnum = JEDEC_ATMEL;
12337         tg3_flag_set(tp, NVRAM_BUFFERED);
12338         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12339 }
12340
12341 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12342 {
12343         u32 nvcfg1;
12344
12345         nvcfg1 = tr32(NVRAM_CFG1);
12346
12347         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12348         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12349         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12350                 tp->nvram_jedecnum = JEDEC_ATMEL;
12351                 tg3_flag_set(tp, NVRAM_BUFFERED);
12352                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12353
12354                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12355                 tw32(NVRAM_CFG1, nvcfg1);
12356                 return;
12357         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12358         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12359         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12360         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12361         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12362         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12363         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12364                 tp->nvram_jedecnum = JEDEC_ATMEL;
12365                 tg3_flag_set(tp, NVRAM_BUFFERED);
12366                 tg3_flag_set(tp, FLASH);
12367
12368                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12369                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12370                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12371                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12372                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12373                         break;
12374                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12375                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12376                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12377                         break;
12378                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12379                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12380                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12381                         break;
12382                 }
12383                 break;
12384         case FLASH_5752VENDOR_ST_M45PE10:
12385         case FLASH_5752VENDOR_ST_M45PE20:
12386         case FLASH_5752VENDOR_ST_M45PE40:
12387                 tp->nvram_jedecnum = JEDEC_ST;
12388                 tg3_flag_set(tp, NVRAM_BUFFERED);
12389                 tg3_flag_set(tp, FLASH);
12390
12391                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12392                 case FLASH_5752VENDOR_ST_M45PE10:
12393                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12394                         break;
12395                 case FLASH_5752VENDOR_ST_M45PE20:
12396                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12397                         break;
12398                 case FLASH_5752VENDOR_ST_M45PE40:
12399                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12400                         break;
12401                 }
12402                 break;
12403         default:
12404                 tg3_flag_set(tp, NO_NVRAM);
12405                 return;
12406         }
12407
12408         tg3_nvram_get_pagesize(tp, nvcfg1);
12409         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12410                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12411 }
12412
12413
12414 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12415 {
12416         u32 nvcfg1;
12417
12418         nvcfg1 = tr32(NVRAM_CFG1);
12419
12420         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12421         case FLASH_5717VENDOR_ATMEL_EEPROM:
12422         case FLASH_5717VENDOR_MICRO_EEPROM:
12423                 tp->nvram_jedecnum = JEDEC_ATMEL;
12424                 tg3_flag_set(tp, NVRAM_BUFFERED);
12425                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12426
12427                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12428                 tw32(NVRAM_CFG1, nvcfg1);
12429                 return;
12430         case FLASH_5717VENDOR_ATMEL_MDB011D:
12431         case FLASH_5717VENDOR_ATMEL_ADB011B:
12432         case FLASH_5717VENDOR_ATMEL_ADB011D:
12433         case FLASH_5717VENDOR_ATMEL_MDB021D:
12434         case FLASH_5717VENDOR_ATMEL_ADB021B:
12435         case FLASH_5717VENDOR_ATMEL_ADB021D:
12436         case FLASH_5717VENDOR_ATMEL_45USPT:
12437                 tp->nvram_jedecnum = JEDEC_ATMEL;
12438                 tg3_flag_set(tp, NVRAM_BUFFERED);
12439                 tg3_flag_set(tp, FLASH);
12440
12441                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12442                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12443                         /* Detect size with tg3_nvram_get_size() */
12444                         break;
12445                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12446                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12447                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12448                         break;
12449                 default:
12450                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12451                         break;
12452                 }
12453                 break;
12454         case FLASH_5717VENDOR_ST_M_M25PE10:
12455         case FLASH_5717VENDOR_ST_A_M25PE10:
12456         case FLASH_5717VENDOR_ST_M_M45PE10:
12457         case FLASH_5717VENDOR_ST_A_M45PE10:
12458         case FLASH_5717VENDOR_ST_M_M25PE20:
12459         case FLASH_5717VENDOR_ST_A_M25PE20:
12460         case FLASH_5717VENDOR_ST_M_M45PE20:
12461         case FLASH_5717VENDOR_ST_A_M45PE20:
12462         case FLASH_5717VENDOR_ST_25USPT:
12463         case FLASH_5717VENDOR_ST_45USPT:
12464                 tp->nvram_jedecnum = JEDEC_ST;
12465                 tg3_flag_set(tp, NVRAM_BUFFERED);
12466                 tg3_flag_set(tp, FLASH);
12467
12468                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12469                 case FLASH_5717VENDOR_ST_M_M25PE20:
12470                 case FLASH_5717VENDOR_ST_M_M45PE20:
12471                         /* Detect size with tg3_nvram_get_size() */
12472                         break;
12473                 case FLASH_5717VENDOR_ST_A_M25PE20:
12474                 case FLASH_5717VENDOR_ST_A_M45PE20:
12475                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12476                         break;
12477                 default:
12478                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12479                         break;
12480                 }
12481                 break;
12482         default:
12483                 tg3_flag_set(tp, NO_NVRAM);
12484                 return;
12485         }
12486
12487         tg3_nvram_get_pagesize(tp, nvcfg1);
12488         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12489                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12490 }
12491
12492 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12493 {
12494         u32 nvcfg1, nvmpinstrp;
12495
12496         nvcfg1 = tr32(NVRAM_CFG1);
12497         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12498
12499         switch (nvmpinstrp) {
12500         case FLASH_5720_EEPROM_HD:
12501         case FLASH_5720_EEPROM_LD:
12502                 tp->nvram_jedecnum = JEDEC_ATMEL;
12503                 tg3_flag_set(tp, NVRAM_BUFFERED);
12504
12505                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12506                 tw32(NVRAM_CFG1, nvcfg1);
12507                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12508                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12509                 else
12510                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12511                 return;
12512         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12513         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12514         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12515         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12516         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12517         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12518         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12519         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12520         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12521         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12522         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12523         case FLASH_5720VENDOR_ATMEL_45USPT:
12524                 tp->nvram_jedecnum = JEDEC_ATMEL;
12525                 tg3_flag_set(tp, NVRAM_BUFFERED);
12526                 tg3_flag_set(tp, FLASH);
12527
12528                 switch (nvmpinstrp) {
12529                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12530                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12531                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12532                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12533                         break;
12534                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12535                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12536                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12537                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12538                         break;
12539                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12540                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12541                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12542                         break;
12543                 default:
12544                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12545                         break;
12546                 }
12547                 break;
12548         case FLASH_5720VENDOR_M_ST_M25PE10:
12549         case FLASH_5720VENDOR_M_ST_M45PE10:
12550         case FLASH_5720VENDOR_A_ST_M25PE10:
12551         case FLASH_5720VENDOR_A_ST_M45PE10:
12552         case FLASH_5720VENDOR_M_ST_M25PE20:
12553         case FLASH_5720VENDOR_M_ST_M45PE20:
12554         case FLASH_5720VENDOR_A_ST_M25PE20:
12555         case FLASH_5720VENDOR_A_ST_M45PE20:
12556         case FLASH_5720VENDOR_M_ST_M25PE40:
12557         case FLASH_5720VENDOR_M_ST_M45PE40:
12558         case FLASH_5720VENDOR_A_ST_M25PE40:
12559         case FLASH_5720VENDOR_A_ST_M45PE40:
12560         case FLASH_5720VENDOR_M_ST_M25PE80:
12561         case FLASH_5720VENDOR_M_ST_M45PE80:
12562         case FLASH_5720VENDOR_A_ST_M25PE80:
12563         case FLASH_5720VENDOR_A_ST_M45PE80:
12564         case FLASH_5720VENDOR_ST_25USPT:
12565         case FLASH_5720VENDOR_ST_45USPT:
12566                 tp->nvram_jedecnum = JEDEC_ST;
12567                 tg3_flag_set(tp, NVRAM_BUFFERED);
12568                 tg3_flag_set(tp, FLASH);
12569
12570                 switch (nvmpinstrp) {
12571                 case FLASH_5720VENDOR_M_ST_M25PE20:
12572                 case FLASH_5720VENDOR_M_ST_M45PE20:
12573                 case FLASH_5720VENDOR_A_ST_M25PE20:
12574                 case FLASH_5720VENDOR_A_ST_M45PE20:
12575                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12576                         break;
12577                 case FLASH_5720VENDOR_M_ST_M25PE40:
12578                 case FLASH_5720VENDOR_M_ST_M45PE40:
12579                 case FLASH_5720VENDOR_A_ST_M25PE40:
12580                 case FLASH_5720VENDOR_A_ST_M45PE40:
12581                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12582                         break;
12583                 case FLASH_5720VENDOR_M_ST_M25PE80:
12584                 case FLASH_5720VENDOR_M_ST_M45PE80:
12585                 case FLASH_5720VENDOR_A_ST_M25PE80:
12586                 case FLASH_5720VENDOR_A_ST_M45PE80:
12587                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12588                         break;
12589                 default:
12590                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12591                         break;
12592                 }
12593                 break;
12594         default:
12595                 tg3_flag_set(tp, NO_NVRAM);
12596                 return;
12597         }
12598
12599         tg3_nvram_get_pagesize(tp, nvcfg1);
12600         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12601                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12602 }
12603
12604 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12605 static void __devinit tg3_nvram_init(struct tg3 *tp)
12606 {
12607         tw32_f(GRC_EEPROM_ADDR,
12608              (EEPROM_ADDR_FSM_RESET |
12609               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12610                EEPROM_ADDR_CLKPERD_SHIFT)));
12611
12612         msleep(1);
12613
12614         /* Enable seeprom accesses. */
12615         tw32_f(GRC_LOCAL_CTRL,
12616              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12617         udelay(100);
12618
12619         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12620             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12621                 tg3_flag_set(tp, NVRAM);
12622
12623                 if (tg3_nvram_lock(tp)) {
12624                         netdev_warn(tp->dev,
12625                                     "Cannot get nvram lock, %s failed\n",
12626                                     __func__);
12627                         return;
12628                 }
12629                 tg3_enable_nvram_access(tp);
12630
12631                 tp->nvram_size = 0;
12632
12633                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12634                         tg3_get_5752_nvram_info(tp);
12635                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12636                         tg3_get_5755_nvram_info(tp);
12637                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12638                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12639                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12640                         tg3_get_5787_nvram_info(tp);
12641                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12642                         tg3_get_5761_nvram_info(tp);
12643                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12644                         tg3_get_5906_nvram_info(tp);
12645                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12646                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12647                         tg3_get_57780_nvram_info(tp);
12648                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12649                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12650                         tg3_get_5717_nvram_info(tp);
12651                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12652                         tg3_get_5720_nvram_info(tp);
12653                 else
12654                         tg3_get_nvram_info(tp);
12655
12656                 if (tp->nvram_size == 0)
12657                         tg3_get_nvram_size(tp);
12658
12659                 tg3_disable_nvram_access(tp);
12660                 tg3_nvram_unlock(tp);
12661
12662         } else {
12663                 tg3_flag_clear(tp, NVRAM);
12664                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12665
12666                 tg3_get_eeprom_size(tp);
12667         }
12668 }
12669
12670 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12671                                     u32 offset, u32 len, u8 *buf)
12672 {
12673         int i, j, rc = 0;
12674         u32 val;
12675
12676         for (i = 0; i < len; i += 4) {
12677                 u32 addr;
12678                 __be32 data;
12679
12680                 addr = offset + i;
12681
12682                 memcpy(&data, buf + i, 4);
12683
12684                 /*
12685                  * The SEEPROM interface expects the data to always be opposite
12686                  * the native endian format.  We accomplish this by reversing
12687                  * all the operations that would have been performed on the
12688                  * data from a call to tg3_nvram_read_be32().
12689                  */
12690                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12691
12692                 val = tr32(GRC_EEPROM_ADDR);
12693                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12694
12695                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12696                         EEPROM_ADDR_READ);
12697                 tw32(GRC_EEPROM_ADDR, val |
12698                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12699                         (addr & EEPROM_ADDR_ADDR_MASK) |
12700                         EEPROM_ADDR_START |
12701                         EEPROM_ADDR_WRITE);
12702
12703                 for (j = 0; j < 1000; j++) {
12704                         val = tr32(GRC_EEPROM_ADDR);
12705
12706                         if (val & EEPROM_ADDR_COMPLETE)
12707                                 break;
12708                         msleep(1);
12709                 }
12710                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12711                         rc = -EBUSY;
12712                         break;
12713                 }
12714         }
12715
12716         return rc;
12717 }
12718
12719 /* offset and length are dword aligned */
12720 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12721                 u8 *buf)
12722 {
12723         int ret = 0;
12724         u32 pagesize = tp->nvram_pagesize;
12725         u32 pagemask = pagesize - 1;
12726         u32 nvram_cmd;
12727         u8 *tmp;
12728
12729         tmp = kmalloc(pagesize, GFP_KERNEL);
12730         if (tmp == NULL)
12731                 return -ENOMEM;
12732
12733         while (len) {
12734                 int j;
12735                 u32 phy_addr, page_off, size;
12736
12737                 phy_addr = offset & ~pagemask;
12738
12739                 for (j = 0; j < pagesize; j += 4) {
12740                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12741                                                   (__be32 *) (tmp + j));
12742                         if (ret)
12743                                 break;
12744                 }
12745                 if (ret)
12746                         break;
12747
12748                 page_off = offset & pagemask;
12749                 size = pagesize;
12750                 if (len < size)
12751                         size = len;
12752
12753                 len -= size;
12754
12755                 memcpy(tmp + page_off, buf, size);
12756
12757                 offset = offset + (pagesize - page_off);
12758
12759                 tg3_enable_nvram_access(tp);
12760
12761                 /*
12762                  * Before we can erase the flash page, we need
12763                  * to issue a special "write enable" command.
12764                  */
12765                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12766
12767                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12768                         break;
12769
12770                 /* Erase the target page */
12771                 tw32(NVRAM_ADDR, phy_addr);
12772
12773                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12774                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12775
12776                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12777                         break;
12778
12779                 /* Issue another write enable to start the write. */
12780                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12781
12782                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12783                         break;
12784
12785                 for (j = 0; j < pagesize; j += 4) {
12786                         __be32 data;
12787
12788                         data = *((__be32 *) (tmp + j));
12789
12790                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12791
12792                         tw32(NVRAM_ADDR, phy_addr + j);
12793
12794                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12795                                 NVRAM_CMD_WR;
12796
12797                         if (j == 0)
12798                                 nvram_cmd |= NVRAM_CMD_FIRST;
12799                         else if (j == (pagesize - 4))
12800                                 nvram_cmd |= NVRAM_CMD_LAST;
12801
12802                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12803                                 break;
12804                 }
12805                 if (ret)
12806                         break;
12807         }
12808
12809         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12810         tg3_nvram_exec_cmd(tp, nvram_cmd);
12811
12812         kfree(tmp);
12813
12814         return ret;
12815 }
12816
12817 /* offset and length are dword aligned */
12818 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12819                 u8 *buf)
12820 {
12821         int i, ret = 0;
12822
12823         for (i = 0; i < len; i += 4, offset += 4) {
12824                 u32 page_off, phy_addr, nvram_cmd;
12825                 __be32 data;
12826
12827                 memcpy(&data, buf + i, 4);
12828                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12829
12830                 page_off = offset % tp->nvram_pagesize;
12831
12832                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12833
12834                 tw32(NVRAM_ADDR, phy_addr);
12835
12836                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12837
12838                 if (page_off == 0 || i == 0)
12839                         nvram_cmd |= NVRAM_CMD_FIRST;
12840                 if (page_off == (tp->nvram_pagesize - 4))
12841                         nvram_cmd |= NVRAM_CMD_LAST;
12842
12843                 if (i == (len - 4))
12844                         nvram_cmd |= NVRAM_CMD_LAST;
12845
12846                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12847                     !tg3_flag(tp, 5755_PLUS) &&
12848                     (tp->nvram_jedecnum == JEDEC_ST) &&
12849                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12850
12851                         if ((ret = tg3_nvram_exec_cmd(tp,
12852                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12853                                 NVRAM_CMD_DONE)))
12854
12855                                 break;
12856                 }
12857                 if (!tg3_flag(tp, FLASH)) {
12858                         /* We always do complete word writes to eeprom. */
12859                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12860                 }
12861
12862                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12863                         break;
12864         }
12865         return ret;
12866 }
12867
12868 /* offset and length are dword aligned */
12869 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12870 {
12871         int ret;
12872
12873         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12874                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12875                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12876                 udelay(40);
12877         }
12878
12879         if (!tg3_flag(tp, NVRAM)) {
12880                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12881         } else {
12882                 u32 grc_mode;
12883
12884                 ret = tg3_nvram_lock(tp);
12885                 if (ret)
12886                         return ret;
12887
12888                 tg3_enable_nvram_access(tp);
12889                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12890                         tw32(NVRAM_WRITE1, 0x406);
12891
12892                 grc_mode = tr32(GRC_MODE);
12893                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12894
12895                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12896                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12897                                 buf);
12898                 } else {
12899                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12900                                 buf);
12901                 }
12902
12903                 grc_mode = tr32(GRC_MODE);
12904                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12905
12906                 tg3_disable_nvram_access(tp);
12907                 tg3_nvram_unlock(tp);
12908         }
12909
12910         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12911                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12912                 udelay(40);
12913         }
12914
12915         return ret;
12916 }
12917
12918 struct subsys_tbl_ent {
12919         u16 subsys_vendor, subsys_devid;
12920         u32 phy_id;
12921 };
12922
12923 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12924         /* Broadcom boards. */
12925         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12926           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12927         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12928           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12929         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12930           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12931         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12932           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12933         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12934           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12935         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12936           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12937         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12938           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12939         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12940           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12941         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12942           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12943         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12944           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12945         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12946           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12947
12948         /* 3com boards. */
12949         { TG3PCI_SUBVENDOR_ID_3COM,
12950           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12951         { TG3PCI_SUBVENDOR_ID_3COM,
12952           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12953         { TG3PCI_SUBVENDOR_ID_3COM,
12954           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12955         { TG3PCI_SUBVENDOR_ID_3COM,
12956           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12957         { TG3PCI_SUBVENDOR_ID_3COM,
12958           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12959
12960         /* DELL boards. */
12961         { TG3PCI_SUBVENDOR_ID_DELL,
12962           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12963         { TG3PCI_SUBVENDOR_ID_DELL,
12964           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12965         { TG3PCI_SUBVENDOR_ID_DELL,
12966           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12967         { TG3PCI_SUBVENDOR_ID_DELL,
12968           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12969
12970         /* Compaq boards. */
12971         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12972           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12973         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12974           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12975         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12976           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12977         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12978           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12979         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12980           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12981
12982         /* IBM boards. */
12983         { TG3PCI_SUBVENDOR_ID_IBM,
12984           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12985 };
12986
12987 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12988 {
12989         int i;
12990
12991         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12992                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12993                      tp->pdev->subsystem_vendor) &&
12994                     (subsys_id_to_phy_id[i].subsys_devid ==
12995                      tp->pdev->subsystem_device))
12996                         return &subsys_id_to_phy_id[i];
12997         }
12998         return NULL;
12999 }
13000
13001 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13002 {
13003         u32 val;
13004
13005         tp->phy_id = TG3_PHY_ID_INVALID;
13006         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13007
13008         /* Assume an onboard device and WOL capable by default.  */
13009         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13010         tg3_flag_set(tp, WOL_CAP);
13011
13012         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13013                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13014                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13015                         tg3_flag_set(tp, IS_NIC);
13016                 }
13017                 val = tr32(VCPU_CFGSHDW);
13018                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13019                         tg3_flag_set(tp, ASPM_WORKAROUND);
13020                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13021                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13022                         tg3_flag_set(tp, WOL_ENABLE);
13023                         device_set_wakeup_enable(&tp->pdev->dev, true);
13024                 }
13025                 goto done;
13026         }
13027
13028         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13029         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13030                 u32 nic_cfg, led_cfg;
13031                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13032                 int eeprom_phy_serdes = 0;
13033
13034                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13035                 tp->nic_sram_data_cfg = nic_cfg;
13036
13037                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13038                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13039                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13040                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13041                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13042                     (ver > 0) && (ver < 0x100))
13043                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13044
13045                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13046                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13047
13048                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13049                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13050                         eeprom_phy_serdes = 1;
13051
13052                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13053                 if (nic_phy_id != 0) {
13054                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13055                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13056
13057                         eeprom_phy_id  = (id1 >> 16) << 10;
13058                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13059                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13060                 } else
13061                         eeprom_phy_id = 0;
13062
13063                 tp->phy_id = eeprom_phy_id;
13064                 if (eeprom_phy_serdes) {
13065                         if (!tg3_flag(tp, 5705_PLUS))
13066                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13067                         else
13068                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13069                 }
13070
13071                 if (tg3_flag(tp, 5750_PLUS))
13072                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13073                                     SHASTA_EXT_LED_MODE_MASK);
13074                 else
13075                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13076
13077                 switch (led_cfg) {
13078                 default:
13079                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13080                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13081                         break;
13082
13083                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13084                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13085                         break;
13086
13087                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13088                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13089
13090                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13091                          * read on some older 5700/5701 bootcode.
13092                          */
13093                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13094                             ASIC_REV_5700 ||
13095                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13096                             ASIC_REV_5701)
13097                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13098
13099                         break;
13100
13101                 case SHASTA_EXT_LED_SHARED:
13102                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13103                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13104                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13105                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13106                                                  LED_CTRL_MODE_PHY_2);
13107                         break;
13108
13109                 case SHASTA_EXT_LED_MAC:
13110                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13111                         break;
13112
13113                 case SHASTA_EXT_LED_COMBO:
13114                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13115                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13116                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13117                                                  LED_CTRL_MODE_PHY_2);
13118                         break;
13119
13120                 }
13121
13122                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13123                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13124                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13125                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13126
13127                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13128                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13129
13130                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13131                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13132                         if ((tp->pdev->subsystem_vendor ==
13133                              PCI_VENDOR_ID_ARIMA) &&
13134                             (tp->pdev->subsystem_device == 0x205a ||
13135                              tp->pdev->subsystem_device == 0x2063))
13136                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13137                 } else {
13138                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13139                         tg3_flag_set(tp, IS_NIC);
13140                 }
13141
13142                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13143                         tg3_flag_set(tp, ENABLE_ASF);
13144                         if (tg3_flag(tp, 5750_PLUS))
13145                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13146                 }
13147
13148                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13149                     tg3_flag(tp, 5750_PLUS))
13150                         tg3_flag_set(tp, ENABLE_APE);
13151
13152                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13153                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13154                         tg3_flag_clear(tp, WOL_CAP);
13155
13156                 if (tg3_flag(tp, WOL_CAP) &&
13157                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13158                         tg3_flag_set(tp, WOL_ENABLE);
13159                         device_set_wakeup_enable(&tp->pdev->dev, true);
13160                 }
13161
13162                 if (cfg2 & (1 << 17))
13163                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13164
13165                 /* serdes signal pre-emphasis in register 0x590 set by */
13166                 /* bootcode if bit 18 is set */
13167                 if (cfg2 & (1 << 18))
13168                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13169
13170                 if ((tg3_flag(tp, 57765_PLUS) ||
13171                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13172                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13173                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13174                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13175
13176                 if (tg3_flag(tp, PCI_EXPRESS) &&
13177                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13178                     !tg3_flag(tp, 57765_PLUS)) {
13179                         u32 cfg3;
13180
13181                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13182                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13183                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13184                 }
13185
13186                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13187                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13188                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13189                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13190                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13191                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13192         }
13193 done:
13194         if (tg3_flag(tp, WOL_CAP))
13195                 device_set_wakeup_enable(&tp->pdev->dev,
13196                                          tg3_flag(tp, WOL_ENABLE));
13197         else
13198                 device_set_wakeup_capable(&tp->pdev->dev, false);
13199 }
13200
13201 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13202 {
13203         int i;
13204         u32 val;
13205
13206         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13207         tw32(OTP_CTRL, cmd);
13208
13209         /* Wait for up to 1 ms for command to execute. */
13210         for (i = 0; i < 100; i++) {
13211                 val = tr32(OTP_STATUS);
13212                 if (val & OTP_STATUS_CMD_DONE)
13213                         break;
13214                 udelay(10);
13215         }
13216
13217         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13218 }
13219
13220 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13221  * configuration is a 32-bit value that straddles the alignment boundary.
13222  * We do two 32-bit reads and then shift and merge the results.
13223  */
13224 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13225 {
13226         u32 bhalf_otp, thalf_otp;
13227
13228         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13229
13230         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13231                 return 0;
13232
13233         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13234
13235         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13236                 return 0;
13237
13238         thalf_otp = tr32(OTP_READ_DATA);
13239
13240         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13241
13242         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13243                 return 0;
13244
13245         bhalf_otp = tr32(OTP_READ_DATA);
13246
13247         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13248 }
13249
13250 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13251 {
13252         u32 adv = ADVERTISED_Autoneg |
13253                   ADVERTISED_Pause;
13254
13255         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13256                 adv |= ADVERTISED_1000baseT_Half |
13257                        ADVERTISED_1000baseT_Full;
13258
13259         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13260                 adv |= ADVERTISED_100baseT_Half |
13261                        ADVERTISED_100baseT_Full |
13262                        ADVERTISED_10baseT_Half |
13263                        ADVERTISED_10baseT_Full |
13264                        ADVERTISED_TP;
13265         else
13266                 adv |= ADVERTISED_FIBRE;
13267
13268         tp->link_config.advertising = adv;
13269         tp->link_config.speed = SPEED_INVALID;
13270         tp->link_config.duplex = DUPLEX_INVALID;
13271         tp->link_config.autoneg = AUTONEG_ENABLE;
13272         tp->link_config.active_speed = SPEED_INVALID;
13273         tp->link_config.active_duplex = DUPLEX_INVALID;
13274         tp->link_config.orig_speed = SPEED_INVALID;
13275         tp->link_config.orig_duplex = DUPLEX_INVALID;
13276         tp->link_config.orig_autoneg = AUTONEG_INVALID;
13277 }
13278
13279 static int __devinit tg3_phy_probe(struct tg3 *tp)
13280 {
13281         u32 hw_phy_id_1, hw_phy_id_2;
13282         u32 hw_phy_id, hw_phy_id_masked;
13283         int err;
13284
13285         /* flow control autonegotiation is default behavior */
13286         tg3_flag_set(tp, PAUSE_AUTONEG);
13287         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13288
13289         if (tg3_flag(tp, USE_PHYLIB))
13290                 return tg3_phy_init(tp);
13291
13292         /* Reading the PHY ID register can conflict with ASF
13293          * firmware access to the PHY hardware.
13294          */
13295         err = 0;
13296         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13297                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13298         } else {
13299                 /* Now read the physical PHY_ID from the chip and verify
13300                  * that it is sane.  If it doesn't look good, we fall back
13301                  * to either the hard-coded table based PHY_ID and failing
13302                  * that the value found in the eeprom area.
13303                  */
13304                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13305                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13306
13307                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13308                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13309                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13310
13311                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13312         }
13313
13314         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13315                 tp->phy_id = hw_phy_id;
13316                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13317                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13318                 else
13319                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13320         } else {
13321                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13322                         /* Do nothing, phy ID already set up in
13323                          * tg3_get_eeprom_hw_cfg().
13324                          */
13325                 } else {
13326                         struct subsys_tbl_ent *p;
13327
13328                         /* No eeprom signature?  Try the hardcoded
13329                          * subsys device table.
13330                          */
13331                         p = tg3_lookup_by_subsys(tp);
13332                         if (!p)
13333                                 return -ENODEV;
13334
13335                         tp->phy_id = p->phy_id;
13336                         if (!tp->phy_id ||
13337                             tp->phy_id == TG3_PHY_ID_BCM8002)
13338                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13339                 }
13340         }
13341
13342         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13343             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13344              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13345              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13346               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13347              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13348               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13349                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13350
13351         tg3_phy_init_link_config(tp);
13352
13353         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13354             !tg3_flag(tp, ENABLE_APE) &&
13355             !tg3_flag(tp, ENABLE_ASF)) {
13356                 u32 bmsr, mask;
13357
13358                 tg3_readphy(tp, MII_BMSR, &bmsr);
13359                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13360                     (bmsr & BMSR_LSTATUS))
13361                         goto skip_phy_reset;
13362
13363                 err = tg3_phy_reset(tp);
13364                 if (err)
13365                         return err;
13366
13367                 tg3_phy_set_wirespeed(tp);
13368
13369                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13370                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13371                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
13372                 if (!tg3_copper_is_advertising_all(tp, mask)) {
13373                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13374                                             tp->link_config.flowctrl);
13375
13376                         tg3_writephy(tp, MII_BMCR,
13377                                      BMCR_ANENABLE | BMCR_ANRESTART);
13378                 }
13379         }
13380
13381 skip_phy_reset:
13382         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13383                 err = tg3_init_5401phy_dsp(tp);
13384                 if (err)
13385                         return err;
13386
13387                 err = tg3_init_5401phy_dsp(tp);
13388         }
13389
13390         return err;
13391 }
13392
13393 static void __devinit tg3_read_vpd(struct tg3 *tp)
13394 {
13395         u8 *vpd_data;
13396         unsigned int block_end, rosize, len;
13397         u32 vpdlen;
13398         int j, i = 0;
13399
13400         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13401         if (!vpd_data)
13402                 goto out_no_vpd;
13403
13404         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13405         if (i < 0)
13406                 goto out_not_found;
13407
13408         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13409         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13410         i += PCI_VPD_LRDT_TAG_SIZE;
13411
13412         if (block_end > vpdlen)
13413                 goto out_not_found;
13414
13415         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13416                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13417         if (j > 0) {
13418                 len = pci_vpd_info_field_size(&vpd_data[j]);
13419
13420                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13421                 if (j + len > block_end || len != 4 ||
13422                     memcmp(&vpd_data[j], "1028", 4))
13423                         goto partno;
13424
13425                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13426                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13427                 if (j < 0)
13428                         goto partno;
13429
13430                 len = pci_vpd_info_field_size(&vpd_data[j]);
13431
13432                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13433                 if (j + len > block_end)
13434                         goto partno;
13435
13436                 if (len >= sizeof(tp->fw_ver))
13437                         len = sizeof(tp->fw_ver) - 1;
13438                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
13439                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
13440                          &vpd_data[j]);
13441         }
13442
13443 partno:
13444         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13445                                       PCI_VPD_RO_KEYWORD_PARTNO);
13446         if (i < 0)
13447                 goto out_not_found;
13448
13449         len = pci_vpd_info_field_size(&vpd_data[i]);
13450
13451         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13452         if (len > TG3_BPN_SIZE ||
13453             (len + i) > vpdlen)
13454                 goto out_not_found;
13455
13456         memcpy(tp->board_part_number, &vpd_data[i], len);
13457
13458 out_not_found:
13459         kfree(vpd_data);
13460         if (tp->board_part_number[0])
13461                 return;
13462
13463 out_no_vpd:
13464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13465                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13466                         strcpy(tp->board_part_number, "BCM5717");
13467                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13468                         strcpy(tp->board_part_number, "BCM5718");
13469                 else
13470                         goto nomatch;
13471         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13472                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13473                         strcpy(tp->board_part_number, "BCM57780");
13474                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13475                         strcpy(tp->board_part_number, "BCM57760");
13476                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13477                         strcpy(tp->board_part_number, "BCM57790");
13478                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13479                         strcpy(tp->board_part_number, "BCM57788");
13480                 else
13481                         goto nomatch;
13482         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13483                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13484                         strcpy(tp->board_part_number, "BCM57761");
13485                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13486                         strcpy(tp->board_part_number, "BCM57765");
13487                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13488                         strcpy(tp->board_part_number, "BCM57781");
13489                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13490                         strcpy(tp->board_part_number, "BCM57785");
13491                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13492                         strcpy(tp->board_part_number, "BCM57791");
13493                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13494                         strcpy(tp->board_part_number, "BCM57795");
13495                 else
13496                         goto nomatch;
13497         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13498                 strcpy(tp->board_part_number, "BCM95906");
13499         } else {
13500 nomatch:
13501                 strcpy(tp->board_part_number, "none");
13502         }
13503 }
13504
13505 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13506 {
13507         u32 val;
13508
13509         if (tg3_nvram_read(tp, offset, &val) ||
13510             (val & 0xfc000000) != 0x0c000000 ||
13511             tg3_nvram_read(tp, offset + 4, &val) ||
13512             val != 0)
13513                 return 0;
13514
13515         return 1;
13516 }
13517
13518 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13519 {
13520         u32 val, offset, start, ver_offset;
13521         int i, dst_off;
13522         bool newver = false;
13523
13524         if (tg3_nvram_read(tp, 0xc, &offset) ||
13525             tg3_nvram_read(tp, 0x4, &start))
13526                 return;
13527
13528         offset = tg3_nvram_logical_addr(tp, offset);
13529
13530         if (tg3_nvram_read(tp, offset, &val))
13531                 return;
13532
13533         if ((val & 0xfc000000) == 0x0c000000) {
13534                 if (tg3_nvram_read(tp, offset + 4, &val))
13535                         return;
13536
13537                 if (val == 0)
13538                         newver = true;
13539         }
13540
13541         dst_off = strlen(tp->fw_ver);
13542
13543         if (newver) {
13544                 if (TG3_VER_SIZE - dst_off < 16 ||
13545                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13546                         return;
13547
13548                 offset = offset + ver_offset - start;
13549                 for (i = 0; i < 16; i += 4) {
13550                         __be32 v;
13551                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13552                                 return;
13553
13554                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13555                 }
13556         } else {
13557                 u32 major, minor;
13558
13559                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13560                         return;
13561
13562                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13563                         TG3_NVM_BCVER_MAJSFT;
13564                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13565                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13566                          "v%d.%02d", major, minor);
13567         }
13568 }
13569
13570 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13571 {
13572         u32 val, major, minor;
13573
13574         /* Use native endian representation */
13575         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13576                 return;
13577
13578         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13579                 TG3_NVM_HWSB_CFG1_MAJSFT;
13580         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13581                 TG3_NVM_HWSB_CFG1_MINSFT;
13582
13583         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13584 }
13585
13586 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13587 {
13588         u32 offset, major, minor, build;
13589
13590         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13591
13592         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13593                 return;
13594
13595         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13596         case TG3_EEPROM_SB_REVISION_0:
13597                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13598                 break;
13599         case TG3_EEPROM_SB_REVISION_2:
13600                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13601                 break;
13602         case TG3_EEPROM_SB_REVISION_3:
13603                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13604                 break;
13605         case TG3_EEPROM_SB_REVISION_4:
13606                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13607                 break;
13608         case TG3_EEPROM_SB_REVISION_5:
13609                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13610                 break;
13611         case TG3_EEPROM_SB_REVISION_6:
13612                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13613                 break;
13614         default:
13615                 return;
13616         }
13617
13618         if (tg3_nvram_read(tp, offset, &val))
13619                 return;
13620
13621         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13622                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13623         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13624                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13625         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13626
13627         if (minor > 99 || build > 26)
13628                 return;
13629
13630         offset = strlen(tp->fw_ver);
13631         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13632                  " v%d.%02d", major, minor);
13633
13634         if (build > 0) {
13635                 offset = strlen(tp->fw_ver);
13636                 if (offset < TG3_VER_SIZE - 1)
13637                         tp->fw_ver[offset] = 'a' + build - 1;
13638         }
13639 }
13640
13641 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13642 {
13643         u32 val, offset, start;
13644         int i, vlen;
13645
13646         for (offset = TG3_NVM_DIR_START;
13647              offset < TG3_NVM_DIR_END;
13648              offset += TG3_NVM_DIRENT_SIZE) {
13649                 if (tg3_nvram_read(tp, offset, &val))
13650                         return;
13651
13652                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13653                         break;
13654         }
13655
13656         if (offset == TG3_NVM_DIR_END)
13657                 return;
13658
13659         if (!tg3_flag(tp, 5705_PLUS))
13660                 start = 0x08000000;
13661         else if (tg3_nvram_read(tp, offset - 4, &start))
13662                 return;
13663
13664         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13665             !tg3_fw_img_is_valid(tp, offset) ||
13666             tg3_nvram_read(tp, offset + 8, &val))
13667                 return;
13668
13669         offset += val - start;
13670
13671         vlen = strlen(tp->fw_ver);
13672
13673         tp->fw_ver[vlen++] = ',';
13674         tp->fw_ver[vlen++] = ' ';
13675
13676         for (i = 0; i < 4; i++) {
13677                 __be32 v;
13678                 if (tg3_nvram_read_be32(tp, offset, &v))
13679                         return;
13680
13681                 offset += sizeof(v);
13682
13683                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13684                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13685                         break;
13686                 }
13687
13688                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13689                 vlen += sizeof(v);
13690         }
13691 }
13692
13693 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13694 {
13695         int vlen;
13696         u32 apedata;
13697         char *fwtype;
13698
13699         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13700                 return;
13701
13702         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13703         if (apedata != APE_SEG_SIG_MAGIC)
13704                 return;
13705
13706         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13707         if (!(apedata & APE_FW_STATUS_READY))
13708                 return;
13709
13710         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13711
13712         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13713                 tg3_flag_set(tp, APE_HAS_NCSI);
13714                 fwtype = "NCSI";
13715         } else {
13716                 fwtype = "DASH";
13717         }
13718
13719         vlen = strlen(tp->fw_ver);
13720
13721         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13722                  fwtype,
13723                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13724                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13725                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13726                  (apedata & APE_FW_VERSION_BLDMSK));
13727 }
13728
13729 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13730 {
13731         u32 val;
13732         bool vpd_vers = false;
13733
13734         if (tp->fw_ver[0] != 0)
13735                 vpd_vers = true;
13736
13737         if (tg3_flag(tp, NO_NVRAM)) {
13738                 strcat(tp->fw_ver, "sb");
13739                 return;
13740         }
13741
13742         if (tg3_nvram_read(tp, 0, &val))
13743                 return;
13744
13745         if (val == TG3_EEPROM_MAGIC)
13746                 tg3_read_bc_ver(tp);
13747         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13748                 tg3_read_sb_ver(tp, val);
13749         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13750                 tg3_read_hwsb_ver(tp);
13751         else
13752                 return;
13753
13754         if (vpd_vers)
13755                 goto done;
13756
13757         if (tg3_flag(tp, ENABLE_APE)) {
13758                 if (tg3_flag(tp, ENABLE_ASF))
13759                         tg3_read_dash_ver(tp);
13760         } else if (tg3_flag(tp, ENABLE_ASF)) {
13761                 tg3_read_mgmtfw_ver(tp);
13762         }
13763
13764 done:
13765         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13766 }
13767
13768 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13769
13770 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13771 {
13772         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13773                 return TG3_RX_RET_MAX_SIZE_5717;
13774         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13775                 return TG3_RX_RET_MAX_SIZE_5700;
13776         else
13777                 return TG3_RX_RET_MAX_SIZE_5705;
13778 }
13779
13780 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13781         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13782         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13783         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13784         { },
13785 };
13786
13787 static int __devinit tg3_get_invariants(struct tg3 *tp)
13788 {
13789         u32 misc_ctrl_reg;
13790         u32 pci_state_reg, grc_misc_cfg;
13791         u32 val;
13792         u16 pci_cmd;
13793         int err;
13794
13795         /* Force memory write invalidate off.  If we leave it on,
13796          * then on 5700_BX chips we have to enable a workaround.
13797          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13798          * to match the cacheline size.  The Broadcom driver have this
13799          * workaround but turns MWI off all the times so never uses
13800          * it.  This seems to suggest that the workaround is insufficient.
13801          */
13802         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13803         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13804         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13805
13806         /* Important! -- Make sure register accesses are byteswapped
13807          * correctly.  Also, for those chips that require it, make
13808          * sure that indirect register accesses are enabled before
13809          * the first operation.
13810          */
13811         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13812                               &misc_ctrl_reg);
13813         tp->misc_host_ctrl |= (misc_ctrl_reg &
13814                                MISC_HOST_CTRL_CHIPREV);
13815         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13816                                tp->misc_host_ctrl);
13817
13818         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13819                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13820         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13821                 u32 prod_id_asic_rev;
13822
13823                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13824                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13825                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13826                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13827                         pci_read_config_dword(tp->pdev,
13828                                               TG3PCI_GEN2_PRODID_ASICREV,
13829                                               &prod_id_asic_rev);
13830                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13831                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13832                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13833                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13834                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13835                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13836                         pci_read_config_dword(tp->pdev,
13837                                               TG3PCI_GEN15_PRODID_ASICREV,
13838                                               &prod_id_asic_rev);
13839                 else
13840                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13841                                               &prod_id_asic_rev);
13842
13843                 tp->pci_chip_rev_id = prod_id_asic_rev;
13844         }
13845
13846         /* Wrong chip ID in 5752 A0. This code can be removed later
13847          * as A0 is not in production.
13848          */
13849         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13850                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13851
13852         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13853          * we need to disable memory and use config. cycles
13854          * only to access all registers. The 5702/03 chips
13855          * can mistakenly decode the special cycles from the
13856          * ICH chipsets as memory write cycles, causing corruption
13857          * of register and memory space. Only certain ICH bridges
13858          * will drive special cycles with non-zero data during the
13859          * address phase which can fall within the 5703's address
13860          * range. This is not an ICH bug as the PCI spec allows
13861          * non-zero address during special cycles. However, only
13862          * these ICH bridges are known to drive non-zero addresses
13863          * during special cycles.
13864          *
13865          * Since special cycles do not cross PCI bridges, we only
13866          * enable this workaround if the 5703 is on the secondary
13867          * bus of these ICH bridges.
13868          */
13869         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13870             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13871                 static struct tg3_dev_id {
13872                         u32     vendor;
13873                         u32     device;
13874                         u32     rev;
13875                 } ich_chipsets[] = {
13876                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13877                           PCI_ANY_ID },
13878                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13879                           PCI_ANY_ID },
13880                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13881                           0xa },
13882                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13883                           PCI_ANY_ID },
13884                         { },
13885                 };
13886                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13887                 struct pci_dev *bridge = NULL;
13888
13889                 while (pci_id->vendor != 0) {
13890                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13891                                                 bridge);
13892                         if (!bridge) {
13893                                 pci_id++;
13894                                 continue;
13895                         }
13896                         if (pci_id->rev != PCI_ANY_ID) {
13897                                 if (bridge->revision > pci_id->rev)
13898                                         continue;
13899                         }
13900                         if (bridge->subordinate &&
13901                             (bridge->subordinate->number ==
13902                              tp->pdev->bus->number)) {
13903                                 tg3_flag_set(tp, ICH_WORKAROUND);
13904                                 pci_dev_put(bridge);
13905                                 break;
13906                         }
13907                 }
13908         }
13909
13910         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13911                 static struct tg3_dev_id {
13912                         u32     vendor;
13913                         u32     device;
13914                 } bridge_chipsets[] = {
13915                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13916                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13917                         { },
13918                 };
13919                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13920                 struct pci_dev *bridge = NULL;
13921
13922                 while (pci_id->vendor != 0) {
13923                         bridge = pci_get_device(pci_id->vendor,
13924                                                 pci_id->device,
13925                                                 bridge);
13926                         if (!bridge) {
13927                                 pci_id++;
13928                                 continue;
13929                         }
13930                         if (bridge->subordinate &&
13931                             (bridge->subordinate->number <=
13932                              tp->pdev->bus->number) &&
13933                             (bridge->subordinate->subordinate >=
13934                              tp->pdev->bus->number)) {
13935                                 tg3_flag_set(tp, 5701_DMA_BUG);
13936                                 pci_dev_put(bridge);
13937                                 break;
13938                         }
13939                 }
13940         }
13941
13942         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13943          * DMA addresses > 40-bit. This bridge may have other additional
13944          * 57xx devices behind it in some 4-port NIC designs for example.
13945          * Any tg3 device found behind the bridge will also need the 40-bit
13946          * DMA workaround.
13947          */
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13950                 tg3_flag_set(tp, 5780_CLASS);
13951                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13952                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13953         } else {
13954                 struct pci_dev *bridge = NULL;
13955
13956                 do {
13957                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13958                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13959                                                 bridge);
13960                         if (bridge && bridge->subordinate &&
13961                             (bridge->subordinate->number <=
13962                              tp->pdev->bus->number) &&
13963                             (bridge->subordinate->subordinate >=
13964                              tp->pdev->bus->number)) {
13965                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13966                                 pci_dev_put(bridge);
13967                                 break;
13968                         }
13969                 } while (bridge);
13970         }
13971
13972         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13974                 tp->pdev_peer = tg3_find_peer(tp);
13975
13976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13977             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13978             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13979                 tg3_flag_set(tp, 5717_PLUS);
13980
13981         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13982             tg3_flag(tp, 5717_PLUS))
13983                 tg3_flag_set(tp, 57765_PLUS);
13984
13985         /* Intentionally exclude ASIC_REV_5906 */
13986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13987             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13988             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13989             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13990             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13991             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13992             tg3_flag(tp, 57765_PLUS))
13993                 tg3_flag_set(tp, 5755_PLUS);
13994
13995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13996             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13997             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13998             tg3_flag(tp, 5755_PLUS) ||
13999             tg3_flag(tp, 5780_CLASS))
14000                 tg3_flag_set(tp, 5750_PLUS);
14001
14002         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14003             tg3_flag(tp, 5750_PLUS))
14004                 tg3_flag_set(tp, 5705_PLUS);
14005
14006         /* Determine TSO capabilities */
14007         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14008                 ; /* Do nothing. HW bug. */
14009         else if (tg3_flag(tp, 57765_PLUS))
14010                 tg3_flag_set(tp, HW_TSO_3);
14011         else if (tg3_flag(tp, 5755_PLUS) ||
14012                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14013                 tg3_flag_set(tp, HW_TSO_2);
14014         else if (tg3_flag(tp, 5750_PLUS)) {
14015                 tg3_flag_set(tp, HW_TSO_1);
14016                 tg3_flag_set(tp, TSO_BUG);
14017                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14018                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14019                         tg3_flag_clear(tp, TSO_BUG);
14020         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14021                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14022                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14023                         tg3_flag_set(tp, TSO_BUG);
14024                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14025                         tp->fw_needed = FIRMWARE_TG3TSO5;
14026                 else
14027                         tp->fw_needed = FIRMWARE_TG3TSO;
14028         }
14029
14030         /* Selectively allow TSO based on operating conditions */
14031         if (tg3_flag(tp, HW_TSO_1) ||
14032             tg3_flag(tp, HW_TSO_2) ||
14033             tg3_flag(tp, HW_TSO_3) ||
14034             tp->fw_needed) {
14035                 /* For firmware TSO, assume ASF is disabled.
14036                  * We'll disable TSO later if we discover ASF
14037                  * is enabled in tg3_get_eeprom_hw_cfg().
14038                  */
14039                 tg3_flag_set(tp, TSO_CAPABLE);
14040         } else {
14041                 tg3_flag_clear(tp, TSO_CAPABLE);
14042                 tg3_flag_clear(tp, TSO_BUG);
14043                 tp->fw_needed = NULL;
14044         }
14045
14046         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14047                 tp->fw_needed = FIRMWARE_TG3;
14048
14049         tp->irq_max = 1;
14050
14051         if (tg3_flag(tp, 5750_PLUS)) {
14052                 tg3_flag_set(tp, SUPPORT_MSI);
14053                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14054                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14055                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14056                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14057                      tp->pdev_peer == tp->pdev))
14058                         tg3_flag_clear(tp, SUPPORT_MSI);
14059
14060                 if (tg3_flag(tp, 5755_PLUS) ||
14061                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14062                         tg3_flag_set(tp, 1SHOT_MSI);
14063                 }
14064
14065                 if (tg3_flag(tp, 57765_PLUS)) {
14066                         tg3_flag_set(tp, SUPPORT_MSIX);
14067                         tp->irq_max = TG3_IRQ_MAX_VECS;
14068                 }
14069         }
14070
14071         if (tg3_flag(tp, 5755_PLUS) ||
14072             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14073                 tg3_flag_set(tp, SHORT_DMA_BUG);
14074
14075         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14076                 tg3_flag_set(tp, 4K_FIFO_LIMIT);
14077
14078         if (tg3_flag(tp, 5717_PLUS))
14079                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14080
14081         if (tg3_flag(tp, 57765_PLUS) &&
14082             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14083                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14084
14085         if (!tg3_flag(tp, 5705_PLUS) ||
14086             tg3_flag(tp, 5780_CLASS) ||
14087             tg3_flag(tp, USE_JUMBO_BDFLAG))
14088                 tg3_flag_set(tp, JUMBO_CAPABLE);
14089
14090         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14091                               &pci_state_reg);
14092
14093         if (pci_is_pcie(tp->pdev)) {
14094                 u16 lnkctl;
14095
14096                 tg3_flag_set(tp, PCI_EXPRESS);
14097
14098                 tp->pcie_readrq = 4096;
14099                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14100                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14101                         tp->pcie_readrq = 2048;
14102
14103                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
14104
14105                 pci_read_config_word(tp->pdev,
14106                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14107                                      &lnkctl);
14108                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14109                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14110                             ASIC_REV_5906) {
14111                                 tg3_flag_clear(tp, HW_TSO_2);
14112                                 tg3_flag_clear(tp, TSO_CAPABLE);
14113                         }
14114                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14115                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14116                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14117                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14118                                 tg3_flag_set(tp, CLKREQ_BUG);
14119                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14120                         tg3_flag_set(tp, L1PLLPD_EN);
14121                 }
14122         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14123                 /* BCM5785 devices are effectively PCIe devices, and should
14124                  * follow PCIe codepaths, but do not have a PCIe capabilities
14125                  * section.
14126                  */
14127                 tg3_flag_set(tp, PCI_EXPRESS);
14128         } else if (!tg3_flag(tp, 5705_PLUS) ||
14129                    tg3_flag(tp, 5780_CLASS)) {
14130                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14131                 if (!tp->pcix_cap) {
14132                         dev_err(&tp->pdev->dev,
14133                                 "Cannot find PCI-X capability, aborting\n");
14134                         return -EIO;
14135                 }
14136
14137                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14138                         tg3_flag_set(tp, PCIX_MODE);
14139         }
14140
14141         /* If we have an AMD 762 or VIA K8T800 chipset, write
14142          * reordering to the mailbox registers done by the host
14143          * controller can cause major troubles.  We read back from
14144          * every mailbox register write to force the writes to be
14145          * posted to the chip in order.
14146          */
14147         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14148             !tg3_flag(tp, PCI_EXPRESS))
14149                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14150
14151         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14152                              &tp->pci_cacheline_sz);
14153         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14154                              &tp->pci_lat_timer);
14155         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14156             tp->pci_lat_timer < 64) {
14157                 tp->pci_lat_timer = 64;
14158                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14159                                       tp->pci_lat_timer);
14160         }
14161
14162         /* Important! -- It is critical that the PCI-X hw workaround
14163          * situation is decided before the first MMIO register access.
14164          */
14165         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14166                 /* 5700 BX chips need to have their TX producer index
14167                  * mailboxes written twice to workaround a bug.
14168                  */
14169                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14170
14171                 /* If we are in PCI-X mode, enable register write workaround.
14172                  *
14173                  * The workaround is to use indirect register accesses
14174                  * for all chip writes not to mailbox registers.
14175                  */
14176                 if (tg3_flag(tp, PCIX_MODE)) {
14177                         u32 pm_reg;
14178
14179                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14180
14181                         /* The chip can have it's power management PCI config
14182                          * space registers clobbered due to this bug.
14183                          * So explicitly force the chip into D0 here.
14184                          */
14185                         pci_read_config_dword(tp->pdev,
14186                                               tp->pm_cap + PCI_PM_CTRL,
14187                                               &pm_reg);
14188                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14189                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14190                         pci_write_config_dword(tp->pdev,
14191                                                tp->pm_cap + PCI_PM_CTRL,
14192                                                pm_reg);
14193
14194                         /* Also, force SERR#/PERR# in PCI command. */
14195                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14196                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14197                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14198                 }
14199         }
14200
14201         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14202                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14203         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14204                 tg3_flag_set(tp, PCI_32BIT);
14205
14206         /* Chip-specific fixup from Broadcom driver */
14207         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14208             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14209                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14210                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14211         }
14212
14213         /* Default fast path register access methods */
14214         tp->read32 = tg3_read32;
14215         tp->write32 = tg3_write32;
14216         tp->read32_mbox = tg3_read32;
14217         tp->write32_mbox = tg3_write32;
14218         tp->write32_tx_mbox = tg3_write32;
14219         tp->write32_rx_mbox = tg3_write32;
14220
14221         /* Various workaround register access methods */
14222         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14223                 tp->write32 = tg3_write_indirect_reg32;
14224         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14225                  (tg3_flag(tp, PCI_EXPRESS) &&
14226                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14227                 /*
14228                  * Back to back register writes can cause problems on these
14229                  * chips, the workaround is to read back all reg writes
14230                  * except those to mailbox regs.
14231                  *
14232                  * See tg3_write_indirect_reg32().
14233                  */
14234                 tp->write32 = tg3_write_flush_reg32;
14235         }
14236
14237         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14238                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14239                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14240                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14241         }
14242
14243         if (tg3_flag(tp, ICH_WORKAROUND)) {
14244                 tp->read32 = tg3_read_indirect_reg32;
14245                 tp->write32 = tg3_write_indirect_reg32;
14246                 tp->read32_mbox = tg3_read_indirect_mbox;
14247                 tp->write32_mbox = tg3_write_indirect_mbox;
14248                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14249                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14250
14251                 iounmap(tp->regs);
14252                 tp->regs = NULL;
14253
14254                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14255                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14256                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14257         }
14258         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14259                 tp->read32_mbox = tg3_read32_mbox_5906;
14260                 tp->write32_mbox = tg3_write32_mbox_5906;
14261                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14262                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14263         }
14264
14265         if (tp->write32 == tg3_write_indirect_reg32 ||
14266             (tg3_flag(tp, PCIX_MODE) &&
14267              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14268               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14269                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14270
14271         /* The memory arbiter has to be enabled in order for SRAM accesses
14272          * to succeed.  Normally on powerup the tg3 chip firmware will make
14273          * sure it is enabled, but other entities such as system netboot
14274          * code might disable it.
14275          */
14276         val = tr32(MEMARB_MODE);
14277         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14278
14279         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14280         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14281             tg3_flag(tp, 5780_CLASS)) {
14282                 if (tg3_flag(tp, PCIX_MODE)) {
14283                         pci_read_config_dword(tp->pdev,
14284                                               tp->pcix_cap + PCI_X_STATUS,
14285                                               &val);
14286                         tp->pci_fn = val & 0x7;
14287                 }
14288         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14289                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14290                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14291                     NIC_SRAM_CPMUSTAT_SIG) {
14292                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14293                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14294                 }
14295         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14296                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14297                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14298                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14299                     NIC_SRAM_CPMUSTAT_SIG) {
14300                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14301                                      TG3_CPMU_STATUS_FSHFT_5719;
14302                 }
14303         }
14304
14305         /* Get eeprom hw config before calling tg3_set_power_state().
14306          * In particular, the TG3_FLAG_IS_NIC flag must be
14307          * determined before calling tg3_set_power_state() so that
14308          * we know whether or not to switch out of Vaux power.
14309          * When the flag is set, it means that GPIO1 is used for eeprom
14310          * write protect and also implies that it is a LOM where GPIOs
14311          * are not used to switch power.
14312          */
14313         tg3_get_eeprom_hw_cfg(tp);
14314
14315         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14316                 tg3_flag_clear(tp, TSO_CAPABLE);
14317                 tg3_flag_clear(tp, TSO_BUG);
14318                 tp->fw_needed = NULL;
14319         }
14320
14321         if (tg3_flag(tp, ENABLE_APE)) {
14322                 /* Allow reads and writes to the
14323                  * APE register and memory space.
14324                  */
14325                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14326                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14327                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14328                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14329                                        pci_state_reg);
14330
14331                 tg3_ape_lock_init(tp);
14332         }
14333
14334         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14335             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14336             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14337             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14338             tg3_flag(tp, 57765_PLUS))
14339                 tg3_flag_set(tp, CPMU_PRESENT);
14340
14341         /* Set up tp->grc_local_ctrl before calling
14342          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14343          * will bring 5700's external PHY out of reset.
14344          * It is also used as eeprom write protect on LOMs.
14345          */
14346         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14348             tg3_flag(tp, EEPROM_WRITE_PROT))
14349                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14350                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14351         /* Unused GPIO3 must be driven as output on 5752 because there
14352          * are no pull-up resistors on unused GPIO pins.
14353          */
14354         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14355                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14356
14357         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
14360                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14361
14362         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14363             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14364                 /* Turn off the debug UART. */
14365                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14366                 if (tg3_flag(tp, IS_NIC))
14367                         /* Keep VMain power. */
14368                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14369                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14370         }
14371
14372         /* Switch out of Vaux if it is a NIC */
14373         tg3_pwrsrc_switch_to_vmain(tp);
14374
14375         /* Derive initial jumbo mode from MTU assigned in
14376          * ether_setup() via the alloc_etherdev() call
14377          */
14378         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14379                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14380
14381         /* Determine WakeOnLan speed to use. */
14382         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14383             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14384             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14385             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14386                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14387         } else {
14388                 tg3_flag_set(tp, WOL_SPEED_100MB);
14389         }
14390
14391         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14392                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14393
14394         /* A few boards don't want Ethernet@WireSpeed phy feature */
14395         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14396             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14397              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14398              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14399             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14400             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14401                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14402
14403         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14404             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14405                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14406         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14407                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14408
14409         if (tg3_flag(tp, 5705_PLUS) &&
14410             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14411             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14412             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14413             !tg3_flag(tp, 57765_PLUS)) {
14414                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14415                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14416                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14417                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14418                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14419                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14420                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14421                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14422                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14423                 } else
14424                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14425         }
14426
14427         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14428             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14429                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14430                 if (tp->phy_otp == 0)
14431                         tp->phy_otp = TG3_OTP_DEFAULT;
14432         }
14433
14434         if (tg3_flag(tp, CPMU_PRESENT))
14435                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14436         else
14437                 tp->mi_mode = MAC_MI_MODE_BASE;
14438
14439         tp->coalesce_mode = 0;
14440         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14441             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14442                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14443
14444         /* Set these bits to enable statistics workaround. */
14445         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14446             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14447             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14448                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14449                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14450         }
14451
14452         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14453             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14454                 tg3_flag_set(tp, USE_PHYLIB);
14455
14456         err = tg3_mdio_init(tp);
14457         if (err)
14458                 return err;
14459
14460         /* Initialize data/descriptor byte/word swapping. */
14461         val = tr32(GRC_MODE);
14462         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14463                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14464                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14465                         GRC_MODE_B2HRX_ENABLE |
14466                         GRC_MODE_HTX2B_ENABLE |
14467                         GRC_MODE_HOST_STACKUP);
14468         else
14469                 val &= GRC_MODE_HOST_STACKUP;
14470
14471         tw32(GRC_MODE, val | tp->grc_mode);
14472
14473         tg3_switch_clocks(tp);
14474
14475         /* Clear this out for sanity. */
14476         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14477
14478         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14479                               &pci_state_reg);
14480         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14481             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14482                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14483
14484                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14485                     chiprevid == CHIPREV_ID_5701_B0 ||
14486                     chiprevid == CHIPREV_ID_5701_B2 ||
14487                     chiprevid == CHIPREV_ID_5701_B5) {
14488                         void __iomem *sram_base;
14489
14490                         /* Write some dummy words into the SRAM status block
14491                          * area, see if it reads back correctly.  If the return
14492                          * value is bad, force enable the PCIX workaround.
14493                          */
14494                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14495
14496                         writel(0x00000000, sram_base);
14497                         writel(0x00000000, sram_base + 4);
14498                         writel(0xffffffff, sram_base + 4);
14499                         if (readl(sram_base) != 0x00000000)
14500                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14501                 }
14502         }
14503
14504         udelay(50);
14505         tg3_nvram_init(tp);
14506
14507         grc_misc_cfg = tr32(GRC_MISC_CFG);
14508         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14509
14510         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14511             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14512              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14513                 tg3_flag_set(tp, IS_5788);
14514
14515         if (!tg3_flag(tp, IS_5788) &&
14516             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14517                 tg3_flag_set(tp, TAGGED_STATUS);
14518         if (tg3_flag(tp, TAGGED_STATUS)) {
14519                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14520                                       HOSTCC_MODE_CLRTICK_TXBD);
14521
14522                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14523                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14524                                        tp->misc_host_ctrl);
14525         }
14526
14527         /* Preserve the APE MAC_MODE bits */
14528         if (tg3_flag(tp, ENABLE_APE))
14529                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14530         else
14531                 tp->mac_mode = 0;
14532
14533         /* these are limited to 10/100 only */
14534         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14535              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14536             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14537              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14538              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14539               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14540               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14541             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14542              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14543               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14544               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14545             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14546             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14547             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14548             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14549                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14550
14551         err = tg3_phy_probe(tp);
14552         if (err) {
14553                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14554                 /* ... but do not return immediately ... */
14555                 tg3_mdio_fini(tp);
14556         }
14557
14558         tg3_read_vpd(tp);
14559         tg3_read_fw_ver(tp);
14560
14561         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14562                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14563         } else {
14564                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14565                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14566                 else
14567                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14568         }
14569
14570         /* 5700 {AX,BX} chips have a broken status block link
14571          * change bit implementation, so we must use the
14572          * status register in those cases.
14573          */
14574         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14575                 tg3_flag_set(tp, USE_LINKCHG_REG);
14576         else
14577                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14578
14579         /* The led_ctrl is set during tg3_phy_probe, here we might
14580          * have to force the link status polling mechanism based
14581          * upon subsystem IDs.
14582          */
14583         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14584             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14585             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14586                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14587                 tg3_flag_set(tp, USE_LINKCHG_REG);
14588         }
14589
14590         /* For all SERDES we poll the MAC status register. */
14591         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14592                 tg3_flag_set(tp, POLL_SERDES);
14593         else
14594                 tg3_flag_clear(tp, POLL_SERDES);
14595
14596         tp->rx_offset = NET_IP_ALIGN;
14597         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14599             tg3_flag(tp, PCIX_MODE)) {
14600                 tp->rx_offset = 0;
14601 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14602                 tp->rx_copy_thresh = ~(u16)0;
14603 #endif
14604         }
14605
14606         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14607         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14608         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14609
14610         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14611
14612         /* Increment the rx prod index on the rx std ring by at most
14613          * 8 for these chips to workaround hw errata.
14614          */
14615         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14616             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14617             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14618                 tp->rx_std_max_post = 8;
14619
14620         if (tg3_flag(tp, ASPM_WORKAROUND))
14621                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14622                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14623
14624         return err;
14625 }
14626
14627 #ifdef CONFIG_SPARC
14628 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14629 {
14630         struct net_device *dev = tp->dev;
14631         struct pci_dev *pdev = tp->pdev;
14632         struct device_node *dp = pci_device_to_OF_node(pdev);
14633         const unsigned char *addr;
14634         int len;
14635
14636         addr = of_get_property(dp, "local-mac-address", &len);
14637         if (addr && len == 6) {
14638                 memcpy(dev->dev_addr, addr, 6);
14639                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14640                 return 0;
14641         }
14642         return -ENODEV;
14643 }
14644
14645 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14646 {
14647         struct net_device *dev = tp->dev;
14648
14649         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14650         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14651         return 0;
14652 }
14653 #endif
14654
14655 static int __devinit tg3_get_device_address(struct tg3 *tp)
14656 {
14657         struct net_device *dev = tp->dev;
14658         u32 hi, lo, mac_offset;
14659         int addr_ok = 0;
14660
14661 #ifdef CONFIG_SPARC
14662         if (!tg3_get_macaddr_sparc(tp))
14663                 return 0;
14664 #endif
14665
14666         mac_offset = 0x7c;
14667         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14668             tg3_flag(tp, 5780_CLASS)) {
14669                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14670                         mac_offset = 0xcc;
14671                 if (tg3_nvram_lock(tp))
14672                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14673                 else
14674                         tg3_nvram_unlock(tp);
14675         } else if (tg3_flag(tp, 5717_PLUS)) {
14676                 if (tp->pci_fn & 1)
14677                         mac_offset = 0xcc;
14678                 if (tp->pci_fn > 1)
14679                         mac_offset += 0x18c;
14680         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14681                 mac_offset = 0x10;
14682
14683         /* First try to get it from MAC address mailbox. */
14684         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14685         if ((hi >> 16) == 0x484b) {
14686                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14687                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14688
14689                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14690                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14691                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14692                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14693                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14694
14695                 /* Some old bootcode may report a 0 MAC address in SRAM */
14696                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14697         }
14698         if (!addr_ok) {
14699                 /* Next, try NVRAM. */
14700                 if (!tg3_flag(tp, NO_NVRAM) &&
14701                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14702                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14703                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14704                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14705                 }
14706                 /* Finally just fetch it out of the MAC control regs. */
14707                 else {
14708                         hi = tr32(MAC_ADDR_0_HIGH);
14709                         lo = tr32(MAC_ADDR_0_LOW);
14710
14711                         dev->dev_addr[5] = lo & 0xff;
14712                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14713                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14714                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14715                         dev->dev_addr[1] = hi & 0xff;
14716                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14717                 }
14718         }
14719
14720         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14721 #ifdef CONFIG_SPARC
14722                 if (!tg3_get_default_macaddr_sparc(tp))
14723                         return 0;
14724 #endif
14725                 return -EINVAL;
14726         }
14727         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14728         return 0;
14729 }
14730
14731 #define BOUNDARY_SINGLE_CACHELINE       1
14732 #define BOUNDARY_MULTI_CACHELINE        2
14733
14734 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14735 {
14736         int cacheline_size;
14737         u8 byte;
14738         int goal;
14739
14740         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14741         if (byte == 0)
14742                 cacheline_size = 1024;
14743         else
14744                 cacheline_size = (int) byte * 4;
14745
14746         /* On 5703 and later chips, the boundary bits have no
14747          * effect.
14748          */
14749         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14750             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14751             !tg3_flag(tp, PCI_EXPRESS))
14752                 goto out;
14753
14754 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14755         goal = BOUNDARY_MULTI_CACHELINE;
14756 #else
14757 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14758         goal = BOUNDARY_SINGLE_CACHELINE;
14759 #else
14760         goal = 0;
14761 #endif
14762 #endif
14763
14764         if (tg3_flag(tp, 57765_PLUS)) {
14765                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14766                 goto out;
14767         }
14768
14769         if (!goal)
14770                 goto out;
14771
14772         /* PCI controllers on most RISC systems tend to disconnect
14773          * when a device tries to burst across a cache-line boundary.
14774          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14775          *
14776          * Unfortunately, for PCI-E there are only limited
14777          * write-side controls for this, and thus for reads
14778          * we will still get the disconnects.  We'll also waste
14779          * these PCI cycles for both read and write for chips
14780          * other than 5700 and 5701 which do not implement the
14781          * boundary bits.
14782          */
14783         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14784                 switch (cacheline_size) {
14785                 case 16:
14786                 case 32:
14787                 case 64:
14788                 case 128:
14789                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14790                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14791                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14792                         } else {
14793                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14794                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14795                         }
14796                         break;
14797
14798                 case 256:
14799                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14800                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14801                         break;
14802
14803                 default:
14804                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14805                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14806                         break;
14807                 }
14808         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14809                 switch (cacheline_size) {
14810                 case 16:
14811                 case 32:
14812                 case 64:
14813                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14814                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14815                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14816                                 break;
14817                         }
14818                         /* fallthrough */
14819                 case 128:
14820                 default:
14821                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14822                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14823                         break;
14824                 }
14825         } else {
14826                 switch (cacheline_size) {
14827                 case 16:
14828                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14829                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14830                                         DMA_RWCTRL_WRITE_BNDRY_16);
14831                                 break;
14832                         }
14833                         /* fallthrough */
14834                 case 32:
14835                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14836                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14837                                         DMA_RWCTRL_WRITE_BNDRY_32);
14838                                 break;
14839                         }
14840                         /* fallthrough */
14841                 case 64:
14842                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14843                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14844                                         DMA_RWCTRL_WRITE_BNDRY_64);
14845                                 break;
14846                         }
14847                         /* fallthrough */
14848                 case 128:
14849                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14850                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14851                                         DMA_RWCTRL_WRITE_BNDRY_128);
14852                                 break;
14853                         }
14854                         /* fallthrough */
14855                 case 256:
14856                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14857                                 DMA_RWCTRL_WRITE_BNDRY_256);
14858                         break;
14859                 case 512:
14860                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14861                                 DMA_RWCTRL_WRITE_BNDRY_512);
14862                         break;
14863                 case 1024:
14864                 default:
14865                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14866                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14867                         break;
14868                 }
14869         }
14870
14871 out:
14872         return val;
14873 }
14874
14875 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14876 {
14877         struct tg3_internal_buffer_desc test_desc;
14878         u32 sram_dma_descs;
14879         int i, ret;
14880
14881         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14882
14883         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14884         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14885         tw32(RDMAC_STATUS, 0);
14886         tw32(WDMAC_STATUS, 0);
14887
14888         tw32(BUFMGR_MODE, 0);
14889         tw32(FTQ_RESET, 0);
14890
14891         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14892         test_desc.addr_lo = buf_dma & 0xffffffff;
14893         test_desc.nic_mbuf = 0x00002100;
14894         test_desc.len = size;
14895
14896         /*
14897          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14898          * the *second* time the tg3 driver was getting loaded after an
14899          * initial scan.
14900          *
14901          * Broadcom tells me:
14902          *   ...the DMA engine is connected to the GRC block and a DMA
14903          *   reset may affect the GRC block in some unpredictable way...
14904          *   The behavior of resets to individual blocks has not been tested.
14905          *
14906          * Broadcom noted the GRC reset will also reset all sub-components.
14907          */
14908         if (to_device) {
14909                 test_desc.cqid_sqid = (13 << 8) | 2;
14910
14911                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14912                 udelay(40);
14913         } else {
14914                 test_desc.cqid_sqid = (16 << 8) | 7;
14915
14916                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14917                 udelay(40);
14918         }
14919         test_desc.flags = 0x00000005;
14920
14921         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14922                 u32 val;
14923
14924                 val = *(((u32 *)&test_desc) + i);
14925                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14926                                        sram_dma_descs + (i * sizeof(u32)));
14927                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14928         }
14929         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14930
14931         if (to_device)
14932                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14933         else
14934                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14935
14936         ret = -ENODEV;
14937         for (i = 0; i < 40; i++) {
14938                 u32 val;
14939
14940                 if (to_device)
14941                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14942                 else
14943                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14944                 if ((val & 0xffff) == sram_dma_descs) {
14945                         ret = 0;
14946                         break;
14947                 }
14948
14949                 udelay(100);
14950         }
14951
14952         return ret;
14953 }
14954
14955 #define TEST_BUFFER_SIZE        0x2000
14956
14957 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14958         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14959         { },
14960 };
14961
14962 static int __devinit tg3_test_dma(struct tg3 *tp)
14963 {
14964         dma_addr_t buf_dma;
14965         u32 *buf, saved_dma_rwctrl;
14966         int ret = 0;
14967
14968         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14969                                  &buf_dma, GFP_KERNEL);
14970         if (!buf) {
14971                 ret = -ENOMEM;
14972                 goto out_nofree;
14973         }
14974
14975         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14976                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14977
14978         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14979
14980         if (tg3_flag(tp, 57765_PLUS))
14981                 goto out;
14982
14983         if (tg3_flag(tp, PCI_EXPRESS)) {
14984                 /* DMA read watermark not used on PCIE */
14985                 tp->dma_rwctrl |= 0x00180000;
14986         } else if (!tg3_flag(tp, PCIX_MODE)) {
14987                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14988                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14989                         tp->dma_rwctrl |= 0x003f0000;
14990                 else
14991                         tp->dma_rwctrl |= 0x003f000f;
14992         } else {
14993                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14994                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14995                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14996                         u32 read_water = 0x7;
14997
14998                         /* If the 5704 is behind the EPB bridge, we can
14999                          * do the less restrictive ONE_DMA workaround for
15000                          * better performance.
15001                          */
15002                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15003                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15004                                 tp->dma_rwctrl |= 0x8000;
15005                         else if (ccval == 0x6 || ccval == 0x7)
15006                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15007
15008                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15009                                 read_water = 4;
15010                         /* Set bit 23 to enable PCIX hw bug fix */
15011                         tp->dma_rwctrl |=
15012                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15013                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15014                                 (1 << 23);
15015                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15016                         /* 5780 always in PCIX mode */
15017                         tp->dma_rwctrl |= 0x00144000;
15018                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15019                         /* 5714 always in PCIX mode */
15020                         tp->dma_rwctrl |= 0x00148000;
15021                 } else {
15022                         tp->dma_rwctrl |= 0x001b000f;
15023                 }
15024         }
15025
15026         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15027             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15028                 tp->dma_rwctrl &= 0xfffffff0;
15029
15030         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15031             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15032                 /* Remove this if it causes problems for some boards. */
15033                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15034
15035                 /* On 5700/5701 chips, we need to set this bit.
15036                  * Otherwise the chip will issue cacheline transactions
15037                  * to streamable DMA memory with not all the byte
15038                  * enables turned on.  This is an error on several
15039                  * RISC PCI controllers, in particular sparc64.
15040                  *
15041                  * On 5703/5704 chips, this bit has been reassigned
15042                  * a different meaning.  In particular, it is used
15043                  * on those chips to enable a PCI-X workaround.
15044                  */
15045                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15046         }
15047
15048         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15049
15050 #if 0
15051         /* Unneeded, already done by tg3_get_invariants.  */
15052         tg3_switch_clocks(tp);
15053 #endif
15054
15055         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15056             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15057                 goto out;
15058
15059         /* It is best to perform DMA test with maximum write burst size
15060          * to expose the 5700/5701 write DMA bug.
15061          */
15062         saved_dma_rwctrl = tp->dma_rwctrl;
15063         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15064         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15065
15066         while (1) {
15067                 u32 *p = buf, i;
15068
15069                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15070                         p[i] = i;
15071
15072                 /* Send the buffer to the chip. */
15073                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15074                 if (ret) {
15075                         dev_err(&tp->pdev->dev,
15076                                 "%s: Buffer write failed. err = %d\n",
15077                                 __func__, ret);
15078                         break;
15079                 }
15080
15081 #if 0
15082                 /* validate data reached card RAM correctly. */
15083                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15084                         u32 val;
15085                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15086                         if (le32_to_cpu(val) != p[i]) {
15087                                 dev_err(&tp->pdev->dev,
15088                                         "%s: Buffer corrupted on device! "
15089                                         "(%d != %d)\n", __func__, val, i);
15090                                 /* ret = -ENODEV here? */
15091                         }
15092                         p[i] = 0;
15093                 }
15094 #endif
15095                 /* Now read it back. */
15096                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15097                 if (ret) {
15098                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15099                                 "err = %d\n", __func__, ret);
15100                         break;
15101                 }
15102
15103                 /* Verify it. */
15104                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15105                         if (p[i] == i)
15106                                 continue;
15107
15108                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15109                             DMA_RWCTRL_WRITE_BNDRY_16) {
15110                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15111                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15112                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15113                                 break;
15114                         } else {
15115                                 dev_err(&tp->pdev->dev,
15116                                         "%s: Buffer corrupted on read back! "
15117                                         "(%d != %d)\n", __func__, p[i], i);
15118                                 ret = -ENODEV;
15119                                 goto out;
15120                         }
15121                 }
15122
15123                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15124                         /* Success. */
15125                         ret = 0;
15126                         break;
15127                 }
15128         }
15129         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15130             DMA_RWCTRL_WRITE_BNDRY_16) {
15131                 /* DMA test passed without adjusting DMA boundary,
15132                  * now look for chipsets that are known to expose the
15133                  * DMA bug without failing the test.
15134                  */
15135                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15136                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15137                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15138                 } else {
15139                         /* Safe to use the calculated DMA boundary. */
15140                         tp->dma_rwctrl = saved_dma_rwctrl;
15141                 }
15142
15143                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15144         }
15145
15146 out:
15147         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15148 out_nofree:
15149         return ret;
15150 }
15151
15152 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15153 {
15154         if (tg3_flag(tp, 57765_PLUS)) {
15155                 tp->bufmgr_config.mbuf_read_dma_low_water =
15156                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15157                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15158                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15159                 tp->bufmgr_config.mbuf_high_water =
15160                         DEFAULT_MB_HIGH_WATER_57765;
15161
15162                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15163                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15164                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15165                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15166                 tp->bufmgr_config.mbuf_high_water_jumbo =
15167                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15168         } else if (tg3_flag(tp, 5705_PLUS)) {
15169                 tp->bufmgr_config.mbuf_read_dma_low_water =
15170                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15171                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15172                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15173                 tp->bufmgr_config.mbuf_high_water =
15174                         DEFAULT_MB_HIGH_WATER_5705;
15175                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15176                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15177                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15178                         tp->bufmgr_config.mbuf_high_water =
15179                                 DEFAULT_MB_HIGH_WATER_5906;
15180                 }
15181
15182                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15183                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15184                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15185                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15186                 tp->bufmgr_config.mbuf_high_water_jumbo =
15187                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15188         } else {
15189                 tp->bufmgr_config.mbuf_read_dma_low_water =
15190                         DEFAULT_MB_RDMA_LOW_WATER;
15191                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15192                         DEFAULT_MB_MACRX_LOW_WATER;
15193                 tp->bufmgr_config.mbuf_high_water =
15194                         DEFAULT_MB_HIGH_WATER;
15195
15196                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15197                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15198                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15199                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15200                 tp->bufmgr_config.mbuf_high_water_jumbo =
15201                         DEFAULT_MB_HIGH_WATER_JUMBO;
15202         }
15203
15204         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15205         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15206 }
15207
15208 static char * __devinit tg3_phy_string(struct tg3 *tp)
15209 {
15210         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15211         case TG3_PHY_ID_BCM5400:        return "5400";
15212         case TG3_PHY_ID_BCM5401:        return "5401";
15213         case TG3_PHY_ID_BCM5411:        return "5411";
15214         case TG3_PHY_ID_BCM5701:        return "5701";
15215         case TG3_PHY_ID_BCM5703:        return "5703";
15216         case TG3_PHY_ID_BCM5704:        return "5704";
15217         case TG3_PHY_ID_BCM5705:        return "5705";
15218         case TG3_PHY_ID_BCM5750:        return "5750";
15219         case TG3_PHY_ID_BCM5752:        return "5752";
15220         case TG3_PHY_ID_BCM5714:        return "5714";
15221         case TG3_PHY_ID_BCM5780:        return "5780";
15222         case TG3_PHY_ID_BCM5755:        return "5755";
15223         case TG3_PHY_ID_BCM5787:        return "5787";
15224         case TG3_PHY_ID_BCM5784:        return "5784";
15225         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15226         case TG3_PHY_ID_BCM5906:        return "5906";
15227         case TG3_PHY_ID_BCM5761:        return "5761";
15228         case TG3_PHY_ID_BCM5718C:       return "5718C";
15229         case TG3_PHY_ID_BCM5718S:       return "5718S";
15230         case TG3_PHY_ID_BCM57765:       return "57765";
15231         case TG3_PHY_ID_BCM5719C:       return "5719C";
15232         case TG3_PHY_ID_BCM5720C:       return "5720C";
15233         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15234         case 0:                 return "serdes";
15235         default:                return "unknown";
15236         }
15237 }
15238
15239 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15240 {
15241         if (tg3_flag(tp, PCI_EXPRESS)) {
15242                 strcpy(str, "PCI Express");
15243                 return str;
15244         } else if (tg3_flag(tp, PCIX_MODE)) {
15245                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15246
15247                 strcpy(str, "PCIX:");
15248
15249                 if ((clock_ctrl == 7) ||
15250                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15251                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15252                         strcat(str, "133MHz");
15253                 else if (clock_ctrl == 0)
15254                         strcat(str, "33MHz");
15255                 else if (clock_ctrl == 2)
15256                         strcat(str, "50MHz");
15257                 else if (clock_ctrl == 4)
15258                         strcat(str, "66MHz");
15259                 else if (clock_ctrl == 6)
15260                         strcat(str, "100MHz");
15261         } else {
15262                 strcpy(str, "PCI:");
15263                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15264                         strcat(str, "66MHz");
15265                 else
15266                         strcat(str, "33MHz");
15267         }
15268         if (tg3_flag(tp, PCI_32BIT))
15269                 strcat(str, ":32-bit");
15270         else
15271                 strcat(str, ":64-bit");
15272         return str;
15273 }
15274
15275 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
15276 {
15277         struct pci_dev *peer;
15278         unsigned int func, devnr = tp->pdev->devfn & ~7;
15279
15280         for (func = 0; func < 8; func++) {
15281                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15282                 if (peer && peer != tp->pdev)
15283                         break;
15284                 pci_dev_put(peer);
15285         }
15286         /* 5704 can be configured in single-port mode, set peer to
15287          * tp->pdev in that case.
15288          */
15289         if (!peer) {
15290                 peer = tp->pdev;
15291                 return peer;
15292         }
15293
15294         /*
15295          * We don't need to keep the refcount elevated; there's no way
15296          * to remove one half of this device without removing the other
15297          */
15298         pci_dev_put(peer);
15299
15300         return peer;
15301 }
15302
15303 static void __devinit tg3_init_coal(struct tg3 *tp)
15304 {
15305         struct ethtool_coalesce *ec = &tp->coal;
15306
15307         memset(ec, 0, sizeof(*ec));
15308         ec->cmd = ETHTOOL_GCOALESCE;
15309         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15310         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15311         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15312         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15313         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15314         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15315         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15316         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15317         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15318
15319         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15320                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15321                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15322                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15323                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15324                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15325         }
15326
15327         if (tg3_flag(tp, 5705_PLUS)) {
15328                 ec->rx_coalesce_usecs_irq = 0;
15329                 ec->tx_coalesce_usecs_irq = 0;
15330                 ec->stats_block_coalesce_usecs = 0;
15331         }
15332 }
15333
15334 static const struct net_device_ops tg3_netdev_ops = {
15335         .ndo_open               = tg3_open,
15336         .ndo_stop               = tg3_close,
15337         .ndo_start_xmit         = tg3_start_xmit,
15338         .ndo_get_stats64        = tg3_get_stats64,
15339         .ndo_validate_addr      = eth_validate_addr,
15340         .ndo_set_rx_mode        = tg3_set_rx_mode,
15341         .ndo_set_mac_address    = tg3_set_mac_addr,
15342         .ndo_do_ioctl           = tg3_ioctl,
15343         .ndo_tx_timeout         = tg3_tx_timeout,
15344         .ndo_change_mtu         = tg3_change_mtu,
15345         .ndo_fix_features       = tg3_fix_features,
15346         .ndo_set_features       = tg3_set_features,
15347 #ifdef CONFIG_NET_POLL_CONTROLLER
15348         .ndo_poll_controller    = tg3_poll_controller,
15349 #endif
15350 };
15351
15352 static int __devinit tg3_init_one(struct pci_dev *pdev,
15353                                   const struct pci_device_id *ent)
15354 {
15355         struct net_device *dev;
15356         struct tg3 *tp;
15357         int i, err, pm_cap;
15358         u32 sndmbx, rcvmbx, intmbx;
15359         char str[40];
15360         u64 dma_mask, persist_dma_mask;
15361         u32 features = 0;
15362
15363         printk_once(KERN_INFO "%s\n", version);
15364
15365         err = pci_enable_device(pdev);
15366         if (err) {
15367                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15368                 return err;
15369         }
15370
15371         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15372         if (err) {
15373                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15374                 goto err_out_disable_pdev;
15375         }
15376
15377         pci_set_master(pdev);
15378
15379         /* Find power-management capability. */
15380         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15381         if (pm_cap == 0) {
15382                 dev_err(&pdev->dev,
15383                         "Cannot find Power Management capability, aborting\n");
15384                 err = -EIO;
15385                 goto err_out_free_res;
15386         }
15387
15388         err = pci_set_power_state(pdev, PCI_D0);
15389         if (err) {
15390                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15391                 goto err_out_free_res;
15392         }
15393
15394         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15395         if (!dev) {
15396                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
15397                 err = -ENOMEM;
15398                 goto err_out_power_down;
15399         }
15400
15401         SET_NETDEV_DEV(dev, &pdev->dev);
15402
15403         tp = netdev_priv(dev);
15404         tp->pdev = pdev;
15405         tp->dev = dev;
15406         tp->pm_cap = pm_cap;
15407         tp->rx_mode = TG3_DEF_RX_MODE;
15408         tp->tx_mode = TG3_DEF_TX_MODE;
15409         tp->irq_sync = 1;
15410
15411         if (tg3_debug > 0)
15412                 tp->msg_enable = tg3_debug;
15413         else
15414                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15415
15416         /* The word/byte swap controls here control register access byte
15417          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15418          * setting below.
15419          */
15420         tp->misc_host_ctrl =
15421                 MISC_HOST_CTRL_MASK_PCI_INT |
15422                 MISC_HOST_CTRL_WORD_SWAP |
15423                 MISC_HOST_CTRL_INDIR_ACCESS |
15424                 MISC_HOST_CTRL_PCISTATE_RW;
15425
15426         /* The NONFRM (non-frame) byte/word swap controls take effect
15427          * on descriptor entries, anything which isn't packet data.
15428          *
15429          * The StrongARM chips on the board (one for tx, one for rx)
15430          * are running in big-endian mode.
15431          */
15432         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15433                         GRC_MODE_WSWAP_NONFRM_DATA);
15434 #ifdef __BIG_ENDIAN
15435         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15436 #endif
15437         spin_lock_init(&tp->lock);
15438         spin_lock_init(&tp->indirect_lock);
15439         INIT_WORK(&tp->reset_task, tg3_reset_task);
15440
15441         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15442         if (!tp->regs) {
15443                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15444                 err = -ENOMEM;
15445                 goto err_out_free_dev;
15446         }
15447
15448         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15449             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15450             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15451             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15452             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15453             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15454             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15455             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15456                 tg3_flag_set(tp, ENABLE_APE);
15457                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15458                 if (!tp->aperegs) {
15459                         dev_err(&pdev->dev,
15460                                 "Cannot map APE registers, aborting\n");
15461                         err = -ENOMEM;
15462                         goto err_out_iounmap;
15463                 }
15464         }
15465
15466         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15467         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15468
15469         dev->ethtool_ops = &tg3_ethtool_ops;
15470         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15471         dev->netdev_ops = &tg3_netdev_ops;
15472         dev->irq = pdev->irq;
15473
15474         err = tg3_get_invariants(tp);
15475         if (err) {
15476                 dev_err(&pdev->dev,
15477                         "Problem fetching invariants of chip, aborting\n");
15478                 goto err_out_apeunmap;
15479         }
15480
15481         /* The EPB bridge inside 5714, 5715, and 5780 and any
15482          * device behind the EPB cannot support DMA addresses > 40-bit.
15483          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15484          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15485          * do DMA address check in tg3_start_xmit().
15486          */
15487         if (tg3_flag(tp, IS_5788))
15488                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15489         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15490                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15491 #ifdef CONFIG_HIGHMEM
15492                 dma_mask = DMA_BIT_MASK(64);
15493 #endif
15494         } else
15495                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15496
15497         /* Configure DMA attributes. */
15498         if (dma_mask > DMA_BIT_MASK(32)) {
15499                 err = pci_set_dma_mask(pdev, dma_mask);
15500                 if (!err) {
15501                         features |= NETIF_F_HIGHDMA;
15502                         err = pci_set_consistent_dma_mask(pdev,
15503                                                           persist_dma_mask);
15504                         if (err < 0) {
15505                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15506                                         "DMA for consistent allocations\n");
15507                                 goto err_out_apeunmap;
15508                         }
15509                 }
15510         }
15511         if (err || dma_mask == DMA_BIT_MASK(32)) {
15512                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15513                 if (err) {
15514                         dev_err(&pdev->dev,
15515                                 "No usable DMA configuration, aborting\n");
15516                         goto err_out_apeunmap;
15517                 }
15518         }
15519
15520         tg3_init_bufmgr_config(tp);
15521
15522         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15523
15524         /* 5700 B0 chips do not support checksumming correctly due
15525          * to hardware bugs.
15526          */
15527         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15528                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15529
15530                 if (tg3_flag(tp, 5755_PLUS))
15531                         features |= NETIF_F_IPV6_CSUM;
15532         }
15533
15534         /* TSO is on by default on chips that support hardware TSO.
15535          * Firmware TSO on older chips gives lower performance, so it
15536          * is off by default, but can be enabled using ethtool.
15537          */
15538         if ((tg3_flag(tp, HW_TSO_1) ||
15539              tg3_flag(tp, HW_TSO_2) ||
15540              tg3_flag(tp, HW_TSO_3)) &&
15541             (features & NETIF_F_IP_CSUM))
15542                 features |= NETIF_F_TSO;
15543         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15544                 if (features & NETIF_F_IPV6_CSUM)
15545                         features |= NETIF_F_TSO6;
15546                 if (tg3_flag(tp, HW_TSO_3) ||
15547                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15548                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15549                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15550                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15551                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15552                         features |= NETIF_F_TSO_ECN;
15553         }
15554
15555         dev->features |= features;
15556         dev->vlan_features |= features;
15557
15558         /*
15559          * Add loopback capability only for a subset of devices that support
15560          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15561          * loopback for the remaining devices.
15562          */
15563         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15564             !tg3_flag(tp, CPMU_PRESENT))
15565                 /* Add the loopback capability */
15566                 features |= NETIF_F_LOOPBACK;
15567
15568         dev->hw_features |= features;
15569
15570         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15571             !tg3_flag(tp, TSO_CAPABLE) &&
15572             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15573                 tg3_flag_set(tp, MAX_RXPEND_64);
15574                 tp->rx_pending = 63;
15575         }
15576
15577         err = tg3_get_device_address(tp);
15578         if (err) {
15579                 dev_err(&pdev->dev,
15580                         "Could not obtain valid ethernet address, aborting\n");
15581                 goto err_out_apeunmap;
15582         }
15583
15584         /*
15585          * Reset chip in case UNDI or EFI driver did not shutdown
15586          * DMA self test will enable WDMAC and we'll see (spurious)
15587          * pending DMA on the PCI bus at that point.
15588          */
15589         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15590             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15591                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15592                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15593         }
15594
15595         err = tg3_test_dma(tp);
15596         if (err) {
15597                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15598                 goto err_out_apeunmap;
15599         }
15600
15601         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15602         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15603         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15604         for (i = 0; i < tp->irq_max; i++) {
15605                 struct tg3_napi *tnapi = &tp->napi[i];
15606
15607                 tnapi->tp = tp;
15608                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15609
15610                 tnapi->int_mbox = intmbx;
15611                 if (i <= 4)
15612                         intmbx += 0x8;
15613                 else
15614                         intmbx += 0x4;
15615
15616                 tnapi->consmbox = rcvmbx;
15617                 tnapi->prodmbox = sndmbx;
15618
15619                 if (i)
15620                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15621                 else
15622                         tnapi->coal_now = HOSTCC_MODE_NOW;
15623
15624                 if (!tg3_flag(tp, SUPPORT_MSIX))
15625                         break;
15626
15627                 /*
15628                  * If we support MSIX, we'll be using RSS.  If we're using
15629                  * RSS, the first vector only handles link interrupts and the
15630                  * remaining vectors handle rx and tx interrupts.  Reuse the
15631                  * mailbox values for the next iteration.  The values we setup
15632                  * above are still useful for the single vectored mode.
15633                  */
15634                 if (!i)
15635                         continue;
15636
15637                 rcvmbx += 0x8;
15638
15639                 if (sndmbx & 0x4)
15640                         sndmbx -= 0x4;
15641                 else
15642                         sndmbx += 0xc;
15643         }
15644
15645         tg3_init_coal(tp);
15646
15647         pci_set_drvdata(pdev, dev);
15648
15649         if (tg3_flag(tp, 5717_PLUS)) {
15650                 /* Resume a low-power mode */
15651                 tg3_frob_aux_power(tp, false);
15652         }
15653
15654         err = register_netdev(dev);
15655         if (err) {
15656                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15657                 goto err_out_apeunmap;
15658         }
15659
15660         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15661                     tp->board_part_number,
15662                     tp->pci_chip_rev_id,
15663                     tg3_bus_string(tp, str),
15664                     dev->dev_addr);
15665
15666         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15667                 struct phy_device *phydev;
15668                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15669                 netdev_info(dev,
15670                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15671                             phydev->drv->name, dev_name(&phydev->dev));
15672         } else {
15673                 char *ethtype;
15674
15675                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15676                         ethtype = "10/100Base-TX";
15677                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15678                         ethtype = "1000Base-SX";
15679                 else
15680                         ethtype = "10/100/1000Base-T";
15681
15682                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15683                             "(WireSpeed[%d], EEE[%d])\n",
15684                             tg3_phy_string(tp), ethtype,
15685                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15686                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15687         }
15688
15689         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15690                     (dev->features & NETIF_F_RXCSUM) != 0,
15691                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15692                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15693                     tg3_flag(tp, ENABLE_ASF) != 0,
15694                     tg3_flag(tp, TSO_CAPABLE) != 0);
15695         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15696                     tp->dma_rwctrl,
15697                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15698                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15699
15700         pci_save_state(pdev);
15701
15702         return 0;
15703
15704 err_out_apeunmap:
15705         if (tp->aperegs) {
15706                 iounmap(tp->aperegs);
15707                 tp->aperegs = NULL;
15708         }
15709
15710 err_out_iounmap:
15711         if (tp->regs) {
15712                 iounmap(tp->regs);
15713                 tp->regs = NULL;
15714         }
15715
15716 err_out_free_dev:
15717         free_netdev(dev);
15718
15719 err_out_power_down:
15720         pci_set_power_state(pdev, PCI_D3hot);
15721
15722 err_out_free_res:
15723         pci_release_regions(pdev);
15724
15725 err_out_disable_pdev:
15726         pci_disable_device(pdev);
15727         pci_set_drvdata(pdev, NULL);
15728         return err;
15729 }
15730
15731 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15732 {
15733         struct net_device *dev = pci_get_drvdata(pdev);
15734
15735         if (dev) {
15736                 struct tg3 *tp = netdev_priv(dev);
15737
15738                 if (tp->fw)
15739                         release_firmware(tp->fw);
15740
15741                 tg3_reset_task_cancel(tp);
15742
15743                 if (tg3_flag(tp, USE_PHYLIB)) {
15744                         tg3_phy_fini(tp);
15745                         tg3_mdio_fini(tp);
15746                 }
15747
15748                 unregister_netdev(dev);
15749                 if (tp->aperegs) {
15750                         iounmap(tp->aperegs);
15751                         tp->aperegs = NULL;
15752                 }
15753                 if (tp->regs) {
15754                         iounmap(tp->regs);
15755                         tp->regs = NULL;
15756                 }
15757                 free_netdev(dev);
15758                 pci_release_regions(pdev);
15759                 pci_disable_device(pdev);
15760                 pci_set_drvdata(pdev, NULL);
15761         }
15762 }
15763
15764 #ifdef CONFIG_PM_SLEEP
15765 static int tg3_suspend(struct device *device)
15766 {
15767         struct pci_dev *pdev = to_pci_dev(device);
15768         struct net_device *dev = pci_get_drvdata(pdev);
15769         struct tg3 *tp = netdev_priv(dev);
15770         int err;
15771
15772         if (!netif_running(dev))
15773                 return 0;
15774
15775         tg3_reset_task_cancel(tp);
15776         tg3_phy_stop(tp);
15777         tg3_netif_stop(tp);
15778
15779         del_timer_sync(&tp->timer);
15780
15781         tg3_full_lock(tp, 1);
15782         tg3_disable_ints(tp);
15783         tg3_full_unlock(tp);
15784
15785         netif_device_detach(dev);
15786
15787         tg3_full_lock(tp, 0);
15788         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15789         tg3_flag_clear(tp, INIT_COMPLETE);
15790         tg3_full_unlock(tp);
15791
15792         err = tg3_power_down_prepare(tp);
15793         if (err) {
15794                 int err2;
15795
15796                 tg3_full_lock(tp, 0);
15797
15798                 tg3_flag_set(tp, INIT_COMPLETE);
15799                 err2 = tg3_restart_hw(tp, 1);
15800                 if (err2)
15801                         goto out;
15802
15803                 tp->timer.expires = jiffies + tp->timer_offset;
15804                 add_timer(&tp->timer);
15805
15806                 netif_device_attach(dev);
15807                 tg3_netif_start(tp);
15808
15809 out:
15810                 tg3_full_unlock(tp);
15811
15812                 if (!err2)
15813                         tg3_phy_start(tp);
15814         }
15815
15816         return err;
15817 }
15818
15819 static int tg3_resume(struct device *device)
15820 {
15821         struct pci_dev *pdev = to_pci_dev(device);
15822         struct net_device *dev = pci_get_drvdata(pdev);
15823         struct tg3 *tp = netdev_priv(dev);
15824         int err;
15825
15826         if (!netif_running(dev))
15827                 return 0;
15828
15829         netif_device_attach(dev);
15830
15831         tg3_full_lock(tp, 0);
15832
15833         tg3_flag_set(tp, INIT_COMPLETE);
15834         err = tg3_restart_hw(tp, 1);
15835         if (err)
15836                 goto out;
15837
15838         tp->timer.expires = jiffies + tp->timer_offset;
15839         add_timer(&tp->timer);
15840
15841         tg3_netif_start(tp);
15842
15843 out:
15844         tg3_full_unlock(tp);
15845
15846         if (!err)
15847                 tg3_phy_start(tp);
15848
15849         return err;
15850 }
15851
15852 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15853 #define TG3_PM_OPS (&tg3_pm_ops)
15854
15855 #else
15856
15857 #define TG3_PM_OPS NULL
15858
15859 #endif /* CONFIG_PM_SLEEP */
15860
15861 /**
15862  * tg3_io_error_detected - called when PCI error is detected
15863  * @pdev: Pointer to PCI device
15864  * @state: The current pci connection state
15865  *
15866  * This function is called after a PCI bus error affecting
15867  * this device has been detected.
15868  */
15869 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15870                                               pci_channel_state_t state)
15871 {
15872         struct net_device *netdev = pci_get_drvdata(pdev);
15873         struct tg3 *tp = netdev_priv(netdev);
15874         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15875
15876         netdev_info(netdev, "PCI I/O error detected\n");
15877
15878         rtnl_lock();
15879
15880         if (!netif_running(netdev))
15881                 goto done;
15882
15883         tg3_phy_stop(tp);
15884
15885         tg3_netif_stop(tp);
15886
15887         del_timer_sync(&tp->timer);
15888
15889         /* Want to make sure that the reset task doesn't run */
15890         tg3_reset_task_cancel(tp);
15891         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15892
15893         netif_device_detach(netdev);
15894
15895         /* Clean up software state, even if MMIO is blocked */
15896         tg3_full_lock(tp, 0);
15897         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15898         tg3_full_unlock(tp);
15899
15900 done:
15901         if (state == pci_channel_io_perm_failure)
15902                 err = PCI_ERS_RESULT_DISCONNECT;
15903         else
15904                 pci_disable_device(pdev);
15905
15906         rtnl_unlock();
15907
15908         return err;
15909 }
15910
15911 /**
15912  * tg3_io_slot_reset - called after the pci bus has been reset.
15913  * @pdev: Pointer to PCI device
15914  *
15915  * Restart the card from scratch, as if from a cold-boot.
15916  * At this point, the card has exprienced a hard reset,
15917  * followed by fixups by BIOS, and has its config space
15918  * set up identically to what it was at cold boot.
15919  */
15920 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15921 {
15922         struct net_device *netdev = pci_get_drvdata(pdev);
15923         struct tg3 *tp = netdev_priv(netdev);
15924         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15925         int err;
15926
15927         rtnl_lock();
15928
15929         if (pci_enable_device(pdev)) {
15930                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15931                 goto done;
15932         }
15933
15934         pci_set_master(pdev);
15935         pci_restore_state(pdev);
15936         pci_save_state(pdev);
15937
15938         if (!netif_running(netdev)) {
15939                 rc = PCI_ERS_RESULT_RECOVERED;
15940                 goto done;
15941         }
15942
15943         err = tg3_power_up(tp);
15944         if (err)
15945                 goto done;
15946
15947         rc = PCI_ERS_RESULT_RECOVERED;
15948
15949 done:
15950         rtnl_unlock();
15951
15952         return rc;
15953 }
15954
15955 /**
15956  * tg3_io_resume - called when traffic can start flowing again.
15957  * @pdev: Pointer to PCI device
15958  *
15959  * This callback is called when the error recovery driver tells
15960  * us that its OK to resume normal operation.
15961  */
15962 static void tg3_io_resume(struct pci_dev *pdev)
15963 {
15964         struct net_device *netdev = pci_get_drvdata(pdev);
15965         struct tg3 *tp = netdev_priv(netdev);
15966         int err;
15967
15968         rtnl_lock();
15969
15970         if (!netif_running(netdev))
15971                 goto done;
15972
15973         tg3_full_lock(tp, 0);
15974         tg3_flag_set(tp, INIT_COMPLETE);
15975         err = tg3_restart_hw(tp, 1);
15976         tg3_full_unlock(tp);
15977         if (err) {
15978                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15979                 goto done;
15980         }
15981
15982         netif_device_attach(netdev);
15983
15984         tp->timer.expires = jiffies + tp->timer_offset;
15985         add_timer(&tp->timer);
15986
15987         tg3_netif_start(tp);
15988
15989         tg3_phy_start(tp);
15990
15991 done:
15992         rtnl_unlock();
15993 }
15994
15995 static struct pci_error_handlers tg3_err_handler = {
15996         .error_detected = tg3_io_error_detected,
15997         .slot_reset     = tg3_io_slot_reset,
15998         .resume         = tg3_io_resume
15999 };
16000
16001 static struct pci_driver tg3_driver = {
16002         .name           = DRV_MODULE_NAME,
16003         .id_table       = tg3_pci_tbl,
16004         .probe          = tg3_init_one,
16005         .remove         = __devexit_p(tg3_remove_one),
16006         .err_handler    = &tg3_err_handler,
16007         .driver.pm      = TG3_PM_OPS,
16008 };
16009
16010 static int __init tg3_init(void)
16011 {
16012         return pci_register_driver(&tg3_driver);
16013 }
16014
16015 static void __exit tg3_cleanup(void)
16016 {
16017         pci_unregister_driver(&tg3_driver);
16018 }
16019
16020 module_init(tg3_init);
16021 module_exit(tg3_cleanup);