]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: Fix jumbo loopback test on 5719
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208
209 #define FIRMWARE_TG3            "tigon/tg3.bin"
210 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
211 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
212
213 static char version[] __devinitdata =
214         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215
216 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
217 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(DRV_MODULE_VERSION);
220 MODULE_FIRMWARE(FIRMWARE_TG3);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223
224 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227
228 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
310         {}
311 };
312
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314
315 static const struct {
316         const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
318         { "rx_octets" },
319         { "rx_fragments" },
320         { "rx_ucast_packets" },
321         { "rx_mcast_packets" },
322         { "rx_bcast_packets" },
323         { "rx_fcs_errors" },
324         { "rx_align_errors" },
325         { "rx_xon_pause_rcvd" },
326         { "rx_xoff_pause_rcvd" },
327         { "rx_mac_ctrl_rcvd" },
328         { "rx_xoff_entered" },
329         { "rx_frame_too_long_errors" },
330         { "rx_jabbers" },
331         { "rx_undersize_packets" },
332         { "rx_in_length_errors" },
333         { "rx_out_length_errors" },
334         { "rx_64_or_less_octet_packets" },
335         { "rx_65_to_127_octet_packets" },
336         { "rx_128_to_255_octet_packets" },
337         { "rx_256_to_511_octet_packets" },
338         { "rx_512_to_1023_octet_packets" },
339         { "rx_1024_to_1522_octet_packets" },
340         { "rx_1523_to_2047_octet_packets" },
341         { "rx_2048_to_4095_octet_packets" },
342         { "rx_4096_to_8191_octet_packets" },
343         { "rx_8192_to_9022_octet_packets" },
344
345         { "tx_octets" },
346         { "tx_collisions" },
347
348         { "tx_xon_sent" },
349         { "tx_xoff_sent" },
350         { "tx_flow_control" },
351         { "tx_mac_errors" },
352         { "tx_single_collisions" },
353         { "tx_mult_collisions" },
354         { "tx_deferred" },
355         { "tx_excessive_collisions" },
356         { "tx_late_collisions" },
357         { "tx_collide_2times" },
358         { "tx_collide_3times" },
359         { "tx_collide_4times" },
360         { "tx_collide_5times" },
361         { "tx_collide_6times" },
362         { "tx_collide_7times" },
363         { "tx_collide_8times" },
364         { "tx_collide_9times" },
365         { "tx_collide_10times" },
366         { "tx_collide_11times" },
367         { "tx_collide_12times" },
368         { "tx_collide_13times" },
369         { "tx_collide_14times" },
370         { "tx_collide_15times" },
371         { "tx_ucast_packets" },
372         { "tx_mcast_packets" },
373         { "tx_bcast_packets" },
374         { "tx_carrier_sense_errors" },
375         { "tx_discards" },
376         { "tx_errors" },
377
378         { "dma_writeq_full" },
379         { "dma_write_prioq_full" },
380         { "rxbds_empty" },
381         { "rx_discards" },
382         { "rx_errors" },
383         { "rx_threshold_hit" },
384
385         { "dma_readq_full" },
386         { "dma_read_prioq_full" },
387         { "tx_comp_queue_full" },
388
389         { "ring_set_send_prod_index" },
390         { "ring_status_update" },
391         { "nic_irqs" },
392         { "nic_avoided_irqs" },
393         { "nic_tx_threshold_hit" },
394
395         { "mbuf_lwm_thresh_hit" },
396 };
397
398 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
399
400
401 static const struct {
402         const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404         { "nvram test        (online) " },
405         { "link test         (online) " },
406         { "register test     (offline)" },
407         { "memory test       (offline)" },
408         { "mac loopback test (offline)" },
409         { "phy loopback test (offline)" },
410         { "ext loopback test (offline)" },
411         { "interrupt test    (offline)" },
412 };
413
414 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
415
416
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 {
419         writel(val, tp->regs + off);
420 }
421
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 {
424         return readl(tp->regs + off);
425 }
426
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 {
429         writel(val, tp->aperegs + off);
430 }
431
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 {
434         return readl(tp->aperegs + off);
435 }
436
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&tp->indirect_lock, flags);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444         spin_unlock_irqrestore(&tp->indirect_lock, flags);
445 }
446
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off);
450         readl(tp->regs + off);
451 }
452
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 {
455         unsigned long flags;
456         u32 val;
457
458         spin_lock_irqsave(&tp->indirect_lock, flags);
459         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461         spin_unlock_irqrestore(&tp->indirect_lock, flags);
462         return val;
463 }
464
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 {
467         unsigned long flags;
468
469         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471                                        TG3_64BIT_REG_LOW, val);
472                 return;
473         }
474         if (off == TG3_RX_STD_PROD_IDX_REG) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476                                        TG3_64BIT_REG_LOW, val);
477                 return;
478         }
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484
485         /* In indirect mode when disabling interrupts, we also need
486          * to clear the interrupt bit in the GRC local ctrl register.
487          */
488         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489             (val == 0x1)) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
492         }
493 }
494
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508  * where it is unsafe to read back the register without some delay.
509  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511  */
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 {
514         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515                 /* Non-posted methods */
516                 tp->write32(tp, off, val);
517         else {
518                 /* Posted method */
519                 tg3_write32(tp, off, val);
520                 if (usec_wait)
521                         udelay(usec_wait);
522                 tp->read32(tp, off);
523         }
524         /* Wait again after the read for the posted method to guarantee that
525          * the wait time is met.
526          */
527         if (usec_wait)
528                 udelay(usec_wait);
529 }
530
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 {
533         tp->write32_mbox(tp, off, val);
534         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535                 tp->read32_mbox(tp, off);
536 }
537
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 {
540         void __iomem *mbox = tp->regs + off;
541         writel(val, mbox);
542         if (tg3_flag(tp, TXD_MBOX_HWBUG))
543                 writel(val, mbox);
544         if (tg3_flag(tp, MBOX_WRITE_REORDER))
545                 readl(mbox);
546 }
547
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 {
550         return readl(tp->regs + off + GRCMBOX_BASE);
551 }
552
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 {
555         writel(val, tp->regs + off + GRCMBOX_BASE);
556 }
557
558 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
563
564 #define tw32(reg, val)                  tp->write32(tp, reg, val)
565 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg)                       tp->read32(tp, reg)
568
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 {
571         unsigned long flags;
572
573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
575                 return;
576
577         spin_lock_irqsave(&tp->indirect_lock, flags);
578         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581
582                 /* Always leave this as zero. */
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584         } else {
585                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587
588                 /* Always leave this as zero. */
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590         }
591         spin_unlock_irqrestore(&tp->indirect_lock, flags);
592 }
593
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 {
596         unsigned long flags;
597
598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
600                 *val = 0;
601                 return;
602         }
603
604         spin_lock_irqsave(&tp->indirect_lock, flags);
605         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608
609                 /* Always leave this as zero. */
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611         } else {
612                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613                 *val = tr32(TG3PCI_MEM_WIN_DATA);
614
615                 /* Always leave this as zero. */
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617         }
618         spin_unlock_irqrestore(&tp->indirect_lock, flags);
619 }
620
621 static void tg3_ape_lock_init(struct tg3 *tp)
622 {
623         int i;
624         u32 regbase, bit;
625
626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627                 regbase = TG3_APE_LOCK_GRANT;
628         else
629                 regbase = TG3_APE_PER_LOCK_GRANT;
630
631         /* Make sure the driver hasn't any stale locks. */
632         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633                 switch (i) {
634                 case TG3_APE_LOCK_PHY0:
635                 case TG3_APE_LOCK_PHY1:
636                 case TG3_APE_LOCK_PHY2:
637                 case TG3_APE_LOCK_PHY3:
638                         bit = APE_LOCK_GRANT_DRIVER;
639                         break;
640                 default:
641                         if (!tp->pci_fn)
642                                 bit = APE_LOCK_GRANT_DRIVER;
643                         else
644                                 bit = 1 << tp->pci_fn;
645                 }
646                 tg3_ape_write32(tp, regbase + 4 * i, bit);
647         }
648
649 }
650
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 {
653         int i, off;
654         int ret = 0;
655         u32 status, req, gnt, bit;
656
657         if (!tg3_flag(tp, ENABLE_APE))
658                 return 0;
659
660         switch (locknum) {
661         case TG3_APE_LOCK_GPIO:
662                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663                         return 0;
664         case TG3_APE_LOCK_GRC:
665         case TG3_APE_LOCK_MEM:
666                 if (!tp->pci_fn)
667                         bit = APE_LOCK_REQ_DRIVER;
668                 else
669                         bit = 1 << tp->pci_fn;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676                 req = TG3_APE_LOCK_REQ;
677                 gnt = TG3_APE_LOCK_GRANT;
678         } else {
679                 req = TG3_APE_PER_LOCK_REQ;
680                 gnt = TG3_APE_PER_LOCK_GRANT;
681         }
682
683         off = 4 * locknum;
684
685         tg3_ape_write32(tp, req + off, bit);
686
687         /* Wait for up to 1 millisecond to acquire lock. */
688         for (i = 0; i < 100; i++) {
689                 status = tg3_ape_read32(tp, gnt + off);
690                 if (status == bit)
691                         break;
692                 udelay(10);
693         }
694
695         if (status != bit) {
696                 /* Revoke the lock request. */
697                 tg3_ape_write32(tp, gnt + off, bit);
698                 ret = -EBUSY;
699         }
700
701         return ret;
702 }
703
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 {
706         u32 gnt, bit;
707
708         if (!tg3_flag(tp, ENABLE_APE))
709                 return;
710
711         switch (locknum) {
712         case TG3_APE_LOCK_GPIO:
713                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714                         return;
715         case TG3_APE_LOCK_GRC:
716         case TG3_APE_LOCK_MEM:
717                 if (!tp->pci_fn)
718                         bit = APE_LOCK_GRANT_DRIVER;
719                 else
720                         bit = 1 << tp->pci_fn;
721                 break;
722         default:
723                 return;
724         }
725
726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727                 gnt = TG3_APE_LOCK_GRANT;
728         else
729                 gnt = TG3_APE_PER_LOCK_GRANT;
730
731         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
732 }
733
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 {
736         int i;
737         u32 apedata;
738
739         /* NCSI does not support APE events */
740         if (tg3_flag(tp, APE_HAS_NCSI))
741                 return;
742
743         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744         if (apedata != APE_SEG_SIG_MAGIC)
745                 return;
746
747         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748         if (!(apedata & APE_FW_STATUS_READY))
749                 return;
750
751         /* Wait for up to 1 millisecond for APE to service previous event. */
752         for (i = 0; i < 10; i++) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757
758                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760                                         event | APE_EVENT_STATUS_EVENT_PENDING);
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
765                         break;
766
767                 udelay(100);
768         }
769
770         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
772 }
773
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 {
776         u32 event;
777         u32 apedata;
778
779         if (!tg3_flag(tp, ENABLE_APE))
780                 return;
781
782         switch (kind) {
783         case RESET_KIND_INIT:
784                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785                                 APE_HOST_SEG_SIG_MAGIC);
786                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787                                 APE_HOST_SEG_LEN_MAGIC);
788                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793                                 APE_HOST_BEHAV_NO_PHYLOCK);
794                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795                                     TG3_APE_HOST_DRVR_STATE_START);
796
797                 event = APE_EVENT_STATUS_STATE_START;
798                 break;
799         case RESET_KIND_SHUTDOWN:
800                 /* With the interface we are currently using,
801                  * APE does not track driver state.  Wiping
802                  * out the HOST SEGMENT SIGNATURE forces
803                  * the APE to assume OS absent status.
804                  */
805                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806
807                 if (device_may_wakeup(&tp->pdev->dev) &&
808                     tg3_flag(tp, WOL_ENABLE)) {
809                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810                                             TG3_APE_HOST_WOL_SPEED_AUTO);
811                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812                 } else
813                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814
815                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816
817                 event = APE_EVENT_STATUS_STATE_UNLOAD;
818                 break;
819         case RESET_KIND_SUSPEND:
820                 event = APE_EVENT_STATUS_STATE_SUSPEND;
821                 break;
822         default:
823                 return;
824         }
825
826         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827
828         tg3_ape_send_event(tp, event);
829 }
830
831 static void tg3_disable_ints(struct tg3 *tp)
832 {
833         int i;
834
835         tw32(TG3PCI_MISC_HOST_CTRL,
836              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837         for (i = 0; i < tp->irq_max; i++)
838                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
839 }
840
841 static void tg3_enable_ints(struct tg3 *tp)
842 {
843         int i;
844
845         tp->irq_sync = 0;
846         wmb();
847
848         tw32(TG3PCI_MISC_HOST_CTRL,
849              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850
851         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852         for (i = 0; i < tp->irq_cnt; i++) {
853                 struct tg3_napi *tnapi = &tp->napi[i];
854
855                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856                 if (tg3_flag(tp, 1SHOT_MSI))
857                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858
859                 tp->coal_now |= tnapi->coal_now;
860         }
861
862         /* Force an initial interrupt */
863         if (!tg3_flag(tp, TAGGED_STATUS) &&
864             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866         else
867                 tw32(HOSTCC_MODE, tp->coal_now);
868
869         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
870 }
871
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 {
874         struct tg3 *tp = tnapi->tp;
875         struct tg3_hw_status *sblk = tnapi->hw_status;
876         unsigned int work_exists = 0;
877
878         /* check for phy events */
879         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880                 if (sblk->status & SD_STATUS_LINK_CHG)
881                         work_exists = 1;
882         }
883         /* check for RX/TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
885             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
886                 work_exists = 1;
887
888         return work_exists;
889 }
890
891 /* tg3_int_reenable
892  *  similar to tg3_enable_ints, but it accurately determines whether there
893  *  is new work pending and can return without flushing the PIO write
894  *  which reenables interrupts
895  */
896 static void tg3_int_reenable(struct tg3_napi *tnapi)
897 {
898         struct tg3 *tp = tnapi->tp;
899
900         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
901         mmiowb();
902
903         /* When doing tagged status, this work check is unnecessary.
904          * The last_tag we write above tells the chip which piece of
905          * work we've completed.
906          */
907         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
908                 tw32(HOSTCC_MODE, tp->coalesce_mode |
909                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
910 }
911
912 static void tg3_switch_clocks(struct tg3 *tp)
913 {
914         u32 clock_ctrl;
915         u32 orig_clock_ctrl;
916
917         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
918                 return;
919
920         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
921
922         orig_clock_ctrl = clock_ctrl;
923         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
924                        CLOCK_CTRL_CLKRUN_OENABLE |
925                        0x1f);
926         tp->pci_clock_ctrl = clock_ctrl;
927
928         if (tg3_flag(tp, 5705_PLUS)) {
929                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
930                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
931                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
932                 }
933         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
934                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                             clock_ctrl |
936                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
937                             40);
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
940                             40);
941         }
942         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
943 }
944
945 #define PHY_BUSY_LOOPS  5000
946
947 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
948 {
949         u32 frame_val;
950         unsigned int loops;
951         int ret;
952
953         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
954                 tw32_f(MAC_MI_MODE,
955                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
956                 udelay(80);
957         }
958
959         *val = 0x0;
960
961         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
962                       MI_COM_PHY_ADDR_MASK);
963         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
964                       MI_COM_REG_ADDR_MASK);
965         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
966
967         tw32_f(MAC_MI_COM, frame_val);
968
969         loops = PHY_BUSY_LOOPS;
970         while (loops != 0) {
971                 udelay(10);
972                 frame_val = tr32(MAC_MI_COM);
973
974                 if ((frame_val & MI_COM_BUSY) == 0) {
975                         udelay(5);
976                         frame_val = tr32(MAC_MI_COM);
977                         break;
978                 }
979                 loops -= 1;
980         }
981
982         ret = -EBUSY;
983         if (loops != 0) {
984                 *val = frame_val & MI_COM_DATA_MASK;
985                 ret = 0;
986         }
987
988         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
989                 tw32_f(MAC_MI_MODE, tp->mi_mode);
990                 udelay(80);
991         }
992
993         return ret;
994 }
995
996 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
997 {
998         u32 frame_val;
999         unsigned int loops;
1000         int ret;
1001
1002         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1003             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1004                 return 0;
1005
1006         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1007                 tw32_f(MAC_MI_MODE,
1008                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1009                 udelay(80);
1010         }
1011
1012         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1013                       MI_COM_PHY_ADDR_MASK);
1014         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1015                       MI_COM_REG_ADDR_MASK);
1016         frame_val |= (val & MI_COM_DATA_MASK);
1017         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1018
1019         tw32_f(MAC_MI_COM, frame_val);
1020
1021         loops = PHY_BUSY_LOOPS;
1022         while (loops != 0) {
1023                 udelay(10);
1024                 frame_val = tr32(MAC_MI_COM);
1025                 if ((frame_val & MI_COM_BUSY) == 0) {
1026                         udelay(5);
1027                         frame_val = tr32(MAC_MI_COM);
1028                         break;
1029                 }
1030                 loops -= 1;
1031         }
1032
1033         ret = -EBUSY;
1034         if (loops != 0)
1035                 ret = 0;
1036
1037         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1038                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1039                 udelay(80);
1040         }
1041
1042         return ret;
1043 }
1044
1045 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1046 {
1047         int err;
1048
1049         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1050         if (err)
1051                 goto done;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1058                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1059         if (err)
1060                 goto done;
1061
1062         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1063
1064 done:
1065         return err;
1066 }
1067
1068 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1069 {
1070         int err;
1071
1072         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1073         if (err)
1074                 goto done;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1081                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1082         if (err)
1083                 goto done;
1084
1085         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1086
1087 done:
1088         return err;
1089 }
1090
1091 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1092 {
1093         int err;
1094
1095         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1096         if (!err)
1097                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1098
1099         return err;
1100 }
1101
1102 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1103 {
1104         int err;
1105
1106         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1107         if (!err)
1108                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1109
1110         return err;
1111 }
1112
1113 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1114 {
1115         int err;
1116
1117         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1118                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1119                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1120         if (!err)
1121                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1122
1123         return err;
1124 }
1125
1126 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1127 {
1128         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1129                 set |= MII_TG3_AUXCTL_MISC_WREN;
1130
1131         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1132 }
1133
1134 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1135         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1136                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1137                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1138
1139 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1140         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1142
1143 static int tg3_bmcr_reset(struct tg3 *tp)
1144 {
1145         u32 phy_control;
1146         int limit, err;
1147
1148         /* OK, reset it, and poll the BMCR_RESET bit until it
1149          * clears or we time out.
1150          */
1151         phy_control = BMCR_RESET;
1152         err = tg3_writephy(tp, MII_BMCR, phy_control);
1153         if (err != 0)
1154                 return -EBUSY;
1155
1156         limit = 5000;
1157         while (limit--) {
1158                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1159                 if (err != 0)
1160                         return -EBUSY;
1161
1162                 if ((phy_control & BMCR_RESET) == 0) {
1163                         udelay(40);
1164                         break;
1165                 }
1166                 udelay(10);
1167         }
1168         if (limit < 0)
1169                 return -EBUSY;
1170
1171         return 0;
1172 }
1173
1174 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1175 {
1176         struct tg3 *tp = bp->priv;
1177         u32 val;
1178
1179         spin_lock_bh(&tp->lock);
1180
1181         if (tg3_readphy(tp, reg, &val))
1182                 val = -EIO;
1183
1184         spin_unlock_bh(&tp->lock);
1185
1186         return val;
1187 }
1188
1189 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1190 {
1191         struct tg3 *tp = bp->priv;
1192         u32 ret = 0;
1193
1194         spin_lock_bh(&tp->lock);
1195
1196         if (tg3_writephy(tp, reg, val))
1197                 ret = -EIO;
1198
1199         spin_unlock_bh(&tp->lock);
1200
1201         return ret;
1202 }
1203
1204 static int tg3_mdio_reset(struct mii_bus *bp)
1205 {
1206         return 0;
1207 }
1208
1209 static void tg3_mdio_config_5785(struct tg3 *tp)
1210 {
1211         u32 val;
1212         struct phy_device *phydev;
1213
1214         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1215         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1216         case PHY_ID_BCM50610:
1217         case PHY_ID_BCM50610M:
1218                 val = MAC_PHYCFG2_50610_LED_MODES;
1219                 break;
1220         case PHY_ID_BCMAC131:
1221                 val = MAC_PHYCFG2_AC131_LED_MODES;
1222                 break;
1223         case PHY_ID_RTL8211C:
1224                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1225                 break;
1226         case PHY_ID_RTL8201E:
1227                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1228                 break;
1229         default:
1230                 return;
1231         }
1232
1233         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1234                 tw32(MAC_PHYCFG2, val);
1235
1236                 val = tr32(MAC_PHYCFG1);
1237                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1238                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1239                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1240                 tw32(MAC_PHYCFG1, val);
1241
1242                 return;
1243         }
1244
1245         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1246                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1247                        MAC_PHYCFG2_FMODE_MASK_MASK |
1248                        MAC_PHYCFG2_GMODE_MASK_MASK |
1249                        MAC_PHYCFG2_ACT_MASK_MASK   |
1250                        MAC_PHYCFG2_QUAL_MASK_MASK |
1251                        MAC_PHYCFG2_INBAND_ENABLE;
1252
1253         tw32(MAC_PHYCFG2, val);
1254
1255         val = tr32(MAC_PHYCFG1);
1256         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1257                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1258         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1259                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1260                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1261                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1262                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1263         }
1264         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1265                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1266         tw32(MAC_PHYCFG1, val);
1267
1268         val = tr32(MAC_EXT_RGMII_MODE);
1269         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1270                  MAC_RGMII_MODE_RX_QUALITY |
1271                  MAC_RGMII_MODE_RX_ACTIVITY |
1272                  MAC_RGMII_MODE_RX_ENG_DET |
1273                  MAC_RGMII_MODE_TX_ENABLE |
1274                  MAC_RGMII_MODE_TX_LOWPWR |
1275                  MAC_RGMII_MODE_TX_RESET);
1276         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1277                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1278                         val |= MAC_RGMII_MODE_RX_INT_B |
1279                                MAC_RGMII_MODE_RX_QUALITY |
1280                                MAC_RGMII_MODE_RX_ACTIVITY |
1281                                MAC_RGMII_MODE_RX_ENG_DET;
1282                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1283                         val |= MAC_RGMII_MODE_TX_ENABLE |
1284                                MAC_RGMII_MODE_TX_LOWPWR |
1285                                MAC_RGMII_MODE_TX_RESET;
1286         }
1287         tw32(MAC_EXT_RGMII_MODE, val);
1288 }
1289
1290 static void tg3_mdio_start(struct tg3 *tp)
1291 {
1292         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1293         tw32_f(MAC_MI_MODE, tp->mi_mode);
1294         udelay(80);
1295
1296         if (tg3_flag(tp, MDIOBUS_INITED) &&
1297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1298                 tg3_mdio_config_5785(tp);
1299 }
1300
1301 static int tg3_mdio_init(struct tg3 *tp)
1302 {
1303         int i;
1304         u32 reg;
1305         struct phy_device *phydev;
1306
1307         if (tg3_flag(tp, 5717_PLUS)) {
1308                 u32 is_serdes;
1309
1310                 tp->phy_addr = tp->pci_fn + 1;
1311
1312                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1313                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1314                 else
1315                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1316                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1317                 if (is_serdes)
1318                         tp->phy_addr += 7;
1319         } else
1320                 tp->phy_addr = TG3_PHY_MII_ADDR;
1321
1322         tg3_mdio_start(tp);
1323
1324         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1325                 return 0;
1326
1327         tp->mdio_bus = mdiobus_alloc();
1328         if (tp->mdio_bus == NULL)
1329                 return -ENOMEM;
1330
1331         tp->mdio_bus->name     = "tg3 mdio bus";
1332         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1333                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1334         tp->mdio_bus->priv     = tp;
1335         tp->mdio_bus->parent   = &tp->pdev->dev;
1336         tp->mdio_bus->read     = &tg3_mdio_read;
1337         tp->mdio_bus->write    = &tg3_mdio_write;
1338         tp->mdio_bus->reset    = &tg3_mdio_reset;
1339         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1340         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1341
1342         for (i = 0; i < PHY_MAX_ADDR; i++)
1343                 tp->mdio_bus->irq[i] = PHY_POLL;
1344
1345         /* The bus registration will look for all the PHYs on the mdio bus.
1346          * Unfortunately, it does not ensure the PHY is powered up before
1347          * accessing the PHY ID registers.  A chip reset is the
1348          * quickest way to bring the device back to an operational state..
1349          */
1350         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1351                 tg3_bmcr_reset(tp);
1352
1353         i = mdiobus_register(tp->mdio_bus);
1354         if (i) {
1355                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1356                 mdiobus_free(tp->mdio_bus);
1357                 return i;
1358         }
1359
1360         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361
1362         if (!phydev || !phydev->drv) {
1363                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1364                 mdiobus_unregister(tp->mdio_bus);
1365                 mdiobus_free(tp->mdio_bus);
1366                 return -ENODEV;
1367         }
1368
1369         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1370         case PHY_ID_BCM57780:
1371                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1372                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1373                 break;
1374         case PHY_ID_BCM50610:
1375         case PHY_ID_BCM50610M:
1376                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1377                                      PHY_BRCM_RX_REFCLK_UNUSED |
1378                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1379                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1380                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1381                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1382                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1383                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1384                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1385                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1386                 /* fallthru */
1387         case PHY_ID_RTL8211C:
1388                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1389                 break;
1390         case PHY_ID_RTL8201E:
1391         case PHY_ID_BCMAC131:
1392                 phydev->interface = PHY_INTERFACE_MODE_MII;
1393                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1394                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1395                 break;
1396         }
1397
1398         tg3_flag_set(tp, MDIOBUS_INITED);
1399
1400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1401                 tg3_mdio_config_5785(tp);
1402
1403         return 0;
1404 }
1405
1406 static void tg3_mdio_fini(struct tg3 *tp)
1407 {
1408         if (tg3_flag(tp, MDIOBUS_INITED)) {
1409                 tg3_flag_clear(tp, MDIOBUS_INITED);
1410                 mdiobus_unregister(tp->mdio_bus);
1411                 mdiobus_free(tp->mdio_bus);
1412         }
1413 }
1414
1415 /* tp->lock is held. */
1416 static inline void tg3_generate_fw_event(struct tg3 *tp)
1417 {
1418         u32 val;
1419
1420         val = tr32(GRC_RX_CPU_EVENT);
1421         val |= GRC_RX_CPU_DRIVER_EVENT;
1422         tw32_f(GRC_RX_CPU_EVENT, val);
1423
1424         tp->last_event_jiffies = jiffies;
1425 }
1426
1427 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428
1429 /* tp->lock is held. */
1430 static void tg3_wait_for_event_ack(struct tg3 *tp)
1431 {
1432         int i;
1433         unsigned int delay_cnt;
1434         long time_remain;
1435
1436         /* If enough time has passed, no wait is necessary. */
1437         time_remain = (long)(tp->last_event_jiffies + 1 +
1438                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1439                       (long)jiffies;
1440         if (time_remain < 0)
1441                 return;
1442
1443         /* Check if we can shorten the wait time. */
1444         delay_cnt = jiffies_to_usecs(time_remain);
1445         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1446                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1447         delay_cnt = (delay_cnt >> 3) + 1;
1448
1449         for (i = 0; i < delay_cnt; i++) {
1450                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1451                         break;
1452                 udelay(8);
1453         }
1454 }
1455
1456 /* tp->lock is held. */
1457 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1458 {
1459         u32 reg, val;
1460
1461         val = 0;
1462         if (!tg3_readphy(tp, MII_BMCR, &reg))
1463                 val = reg << 16;
1464         if (!tg3_readphy(tp, MII_BMSR, &reg))
1465                 val |= (reg & 0xffff);
1466         *data++ = val;
1467
1468         val = 0;
1469         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1470                 val = reg << 16;
1471         if (!tg3_readphy(tp, MII_LPA, &reg))
1472                 val |= (reg & 0xffff);
1473         *data++ = val;
1474
1475         val = 0;
1476         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1477                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1478                         val = reg << 16;
1479                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1480                         val |= (reg & 0xffff);
1481         }
1482         *data++ = val;
1483
1484         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1485                 val = reg << 16;
1486         else
1487                 val = 0;
1488         *data++ = val;
1489 }
1490
1491 /* tp->lock is held. */
1492 static void tg3_ump_link_report(struct tg3 *tp)
1493 {
1494         u32 data[4];
1495
1496         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1497                 return;
1498
1499         tg3_phy_gather_ump_data(tp, data);
1500
1501         tg3_wait_for_event_ack(tp);
1502
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1509
1510         tg3_generate_fw_event(tp);
1511 }
1512
1513 /* tp->lock is held. */
1514 static void tg3_stop_fw(struct tg3 *tp)
1515 {
1516         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1517                 /* Wait for RX cpu to ACK the previous event. */
1518                 tg3_wait_for_event_ack(tp);
1519
1520                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1521
1522                 tg3_generate_fw_event(tp);
1523
1524                 /* Wait for RX cpu to ACK this event. */
1525                 tg3_wait_for_event_ack(tp);
1526         }
1527 }
1528
1529 /* tp->lock is held. */
1530 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1531 {
1532         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1533                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1534
1535         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1536                 switch (kind) {
1537                 case RESET_KIND_INIT:
1538                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1539                                       DRV_STATE_START);
1540                         break;
1541
1542                 case RESET_KIND_SHUTDOWN:
1543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1544                                       DRV_STATE_UNLOAD);
1545                         break;
1546
1547                 case RESET_KIND_SUSPEND:
1548                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1549                                       DRV_STATE_SUSPEND);
1550                         break;
1551
1552                 default:
1553                         break;
1554                 }
1555         }
1556
1557         if (kind == RESET_KIND_INIT ||
1558             kind == RESET_KIND_SUSPEND)
1559                 tg3_ape_driver_state_change(tp, kind);
1560 }
1561
1562 /* tp->lock is held. */
1563 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1564 {
1565         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1566                 switch (kind) {
1567                 case RESET_KIND_INIT:
1568                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1569                                       DRV_STATE_START_DONE);
1570                         break;
1571
1572                 case RESET_KIND_SHUTDOWN:
1573                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574                                       DRV_STATE_UNLOAD_DONE);
1575                         break;
1576
1577                 default:
1578                         break;
1579                 }
1580         }
1581
1582         if (kind == RESET_KIND_SHUTDOWN)
1583                 tg3_ape_driver_state_change(tp, kind);
1584 }
1585
1586 /* tp->lock is held. */
1587 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1588 {
1589         if (tg3_flag(tp, ENABLE_ASF)) {
1590                 switch (kind) {
1591                 case RESET_KIND_INIT:
1592                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1593                                       DRV_STATE_START);
1594                         break;
1595
1596                 case RESET_KIND_SHUTDOWN:
1597                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1598                                       DRV_STATE_UNLOAD);
1599                         break;
1600
1601                 case RESET_KIND_SUSPEND:
1602                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1603                                       DRV_STATE_SUSPEND);
1604                         break;
1605
1606                 default:
1607                         break;
1608                 }
1609         }
1610 }
1611
1612 static int tg3_poll_fw(struct tg3 *tp)
1613 {
1614         int i;
1615         u32 val;
1616
1617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1618                 /* Wait up to 20ms for init done. */
1619                 for (i = 0; i < 200; i++) {
1620                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1621                                 return 0;
1622                         udelay(100);
1623                 }
1624                 return -ENODEV;
1625         }
1626
1627         /* Wait for firmware initialization to complete. */
1628         for (i = 0; i < 100000; i++) {
1629                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1630                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1631                         break;
1632                 udelay(10);
1633         }
1634
1635         /* Chip might not be fitted with firmware.  Some Sun onboard
1636          * parts are configured like that.  So don't signal the timeout
1637          * of the above loop as an error, but do report the lack of
1638          * running firmware once.
1639          */
1640         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1641                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1642
1643                 netdev_info(tp->dev, "No firmware running\n");
1644         }
1645
1646         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1647                 /* The 57765 A0 needs a little more
1648                  * time to do some important work.
1649                  */
1650                 mdelay(10);
1651         }
1652
1653         return 0;
1654 }
1655
1656 static void tg3_link_report(struct tg3 *tp)
1657 {
1658         if (!netif_carrier_ok(tp->dev)) {
1659                 netif_info(tp, link, tp->dev, "Link is down\n");
1660                 tg3_ump_link_report(tp);
1661         } else if (netif_msg_link(tp)) {
1662                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1663                             (tp->link_config.active_speed == SPEED_1000 ?
1664                              1000 :
1665                              (tp->link_config.active_speed == SPEED_100 ?
1666                               100 : 10)),
1667                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1668                              "full" : "half"));
1669
1670                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1671                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1672                             "on" : "off",
1673                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1674                             "on" : "off");
1675
1676                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1677                         netdev_info(tp->dev, "EEE is %s\n",
1678                                     tp->setlpicnt ? "enabled" : "disabled");
1679
1680                 tg3_ump_link_report(tp);
1681         }
1682 }
1683
1684 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1685 {
1686         u16 miireg;
1687
1688         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1689                 miireg = ADVERTISE_1000XPAUSE;
1690         else if (flow_ctrl & FLOW_CTRL_TX)
1691                 miireg = ADVERTISE_1000XPSE_ASYM;
1692         else if (flow_ctrl & FLOW_CTRL_RX)
1693                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1694         else
1695                 miireg = 0;
1696
1697         return miireg;
1698 }
1699
1700 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1701 {
1702         u8 cap = 0;
1703
1704         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1705                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1706         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1707                 if (lcladv & ADVERTISE_1000XPAUSE)
1708                         cap = FLOW_CTRL_RX;
1709                 if (rmtadv & ADVERTISE_1000XPAUSE)
1710                         cap = FLOW_CTRL_TX;
1711         }
1712
1713         return cap;
1714 }
1715
1716 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1717 {
1718         u8 autoneg;
1719         u8 flowctrl = 0;
1720         u32 old_rx_mode = tp->rx_mode;
1721         u32 old_tx_mode = tp->tx_mode;
1722
1723         if (tg3_flag(tp, USE_PHYLIB))
1724                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1725         else
1726                 autoneg = tp->link_config.autoneg;
1727
1728         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1729                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1730                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1731                 else
1732                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1733         } else
1734                 flowctrl = tp->link_config.flowctrl;
1735
1736         tp->link_config.active_flowctrl = flowctrl;
1737
1738         if (flowctrl & FLOW_CTRL_RX)
1739                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1740         else
1741                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1742
1743         if (old_rx_mode != tp->rx_mode)
1744                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1745
1746         if (flowctrl & FLOW_CTRL_TX)
1747                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1748         else
1749                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1750
1751         if (old_tx_mode != tp->tx_mode)
1752                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1753 }
1754
1755 static void tg3_adjust_link(struct net_device *dev)
1756 {
1757         u8 oldflowctrl, linkmesg = 0;
1758         u32 mac_mode, lcl_adv, rmt_adv;
1759         struct tg3 *tp = netdev_priv(dev);
1760         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1761
1762         spin_lock_bh(&tp->lock);
1763
1764         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1765                                     MAC_MODE_HALF_DUPLEX);
1766
1767         oldflowctrl = tp->link_config.active_flowctrl;
1768
1769         if (phydev->link) {
1770                 lcl_adv = 0;
1771                 rmt_adv = 0;
1772
1773                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1774                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1775                 else if (phydev->speed == SPEED_1000 ||
1776                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1777                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1778                 else
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780
1781                 if (phydev->duplex == DUPLEX_HALF)
1782                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1783                 else {
1784                         lcl_adv = mii_advertise_flowctrl(
1785                                   tp->link_config.flowctrl);
1786
1787                         if (phydev->pause)
1788                                 rmt_adv = LPA_PAUSE_CAP;
1789                         if (phydev->asym_pause)
1790                                 rmt_adv |= LPA_PAUSE_ASYM;
1791                 }
1792
1793                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1794         } else
1795                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1796
1797         if (mac_mode != tp->mac_mode) {
1798                 tp->mac_mode = mac_mode;
1799                 tw32_f(MAC_MODE, tp->mac_mode);
1800                 udelay(40);
1801         }
1802
1803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1804                 if (phydev->speed == SPEED_10)
1805                         tw32(MAC_MI_STAT,
1806                              MAC_MI_STAT_10MBPS_MODE |
1807                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1808                 else
1809                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1810         }
1811
1812         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1813                 tw32(MAC_TX_LENGTHS,
1814                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1815                       (6 << TX_LENGTHS_IPG_SHIFT) |
1816                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1817         else
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822
1823         if (phydev->link != tp->old_link ||
1824             phydev->speed != tp->link_config.active_speed ||
1825             phydev->duplex != tp->link_config.active_duplex ||
1826             oldflowctrl != tp->link_config.active_flowctrl)
1827                 linkmesg = 1;
1828
1829         tp->old_link = phydev->link;
1830         tp->link_config.active_speed = phydev->speed;
1831         tp->link_config.active_duplex = phydev->duplex;
1832
1833         spin_unlock_bh(&tp->lock);
1834
1835         if (linkmesg)
1836                 tg3_link_report(tp);
1837 }
1838
1839 static int tg3_phy_init(struct tg3 *tp)
1840 {
1841         struct phy_device *phydev;
1842
1843         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1844                 return 0;
1845
1846         /* Bring the PHY back to a known state. */
1847         tg3_bmcr_reset(tp);
1848
1849         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1850
1851         /* Attach the MAC to the PHY. */
1852         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1853                              phydev->dev_flags, phydev->interface);
1854         if (IS_ERR(phydev)) {
1855                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1856                 return PTR_ERR(phydev);
1857         }
1858
1859         /* Mask with MAC supported features. */
1860         switch (phydev->interface) {
1861         case PHY_INTERFACE_MODE_GMII:
1862         case PHY_INTERFACE_MODE_RGMII:
1863                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1864                         phydev->supported &= (PHY_GBIT_FEATURES |
1865                                               SUPPORTED_Pause |
1866                                               SUPPORTED_Asym_Pause);
1867                         break;
1868                 }
1869                 /* fallthru */
1870         case PHY_INTERFACE_MODE_MII:
1871                 phydev->supported &= (PHY_BASIC_FEATURES |
1872                                       SUPPORTED_Pause |
1873                                       SUPPORTED_Asym_Pause);
1874                 break;
1875         default:
1876                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1877                 return -EINVAL;
1878         }
1879
1880         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1881
1882         phydev->advertising = phydev->supported;
1883
1884         return 0;
1885 }
1886
1887 static void tg3_phy_start(struct tg3 *tp)
1888 {
1889         struct phy_device *phydev;
1890
1891         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1892                 return;
1893
1894         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1895
1896         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1897                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1898                 phydev->speed = tp->link_config.speed;
1899                 phydev->duplex = tp->link_config.duplex;
1900                 phydev->autoneg = tp->link_config.autoneg;
1901                 phydev->advertising = tp->link_config.advertising;
1902         }
1903
1904         phy_start(phydev);
1905
1906         phy_start_aneg(phydev);
1907 }
1908
1909 static void tg3_phy_stop(struct tg3 *tp)
1910 {
1911         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1912                 return;
1913
1914         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1915 }
1916
1917 static void tg3_phy_fini(struct tg3 *tp)
1918 {
1919         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1920                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1922         }
1923 }
1924
1925 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1926 {
1927         int err;
1928         u32 val;
1929
1930         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1931                 return 0;
1932
1933         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1934                 /* Cannot do read-modify-write on 5401 */
1935                 err = tg3_phy_auxctl_write(tp,
1936                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1937                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1938                                            0x4c20);
1939                 goto done;
1940         }
1941
1942         err = tg3_phy_auxctl_read(tp,
1943                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1944         if (err)
1945                 return err;
1946
1947         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1948         err = tg3_phy_auxctl_write(tp,
1949                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1950
1951 done:
1952         return err;
1953 }
1954
1955 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1956 {
1957         u32 phytest;
1958
1959         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1960                 u32 phy;
1961
1962                 tg3_writephy(tp, MII_TG3_FET_TEST,
1963                              phytest | MII_TG3_FET_SHADOW_EN);
1964                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1965                         if (enable)
1966                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1967                         else
1968                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1969                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1970                 }
1971                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1972         }
1973 }
1974
1975 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1976 {
1977         u32 reg;
1978
1979         if (!tg3_flag(tp, 5705_PLUS) ||
1980             (tg3_flag(tp, 5717_PLUS) &&
1981              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1982                 return;
1983
1984         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1985                 tg3_phy_fet_toggle_apd(tp, enable);
1986                 return;
1987         }
1988
1989         reg = MII_TG3_MISC_SHDW_WREN |
1990               MII_TG3_MISC_SHDW_SCR5_SEL |
1991               MII_TG3_MISC_SHDW_SCR5_LPED |
1992               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1993               MII_TG3_MISC_SHDW_SCR5_SDTL |
1994               MII_TG3_MISC_SHDW_SCR5_C125OE;
1995         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1996                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1997
1998         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1999
2000
2001         reg = MII_TG3_MISC_SHDW_WREN |
2002               MII_TG3_MISC_SHDW_APD_SEL |
2003               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2004         if (enable)
2005                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2006
2007         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2008 }
2009
2010 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2011 {
2012         u32 phy;
2013
2014         if (!tg3_flag(tp, 5705_PLUS) ||
2015             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2016                 return;
2017
2018         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2019                 u32 ephy;
2020
2021                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2022                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2023
2024                         tg3_writephy(tp, MII_TG3_FET_TEST,
2025                                      ephy | MII_TG3_FET_SHADOW_EN);
2026                         if (!tg3_readphy(tp, reg, &phy)) {
2027                                 if (enable)
2028                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2029                                 else
2030                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2031                                 tg3_writephy(tp, reg, phy);
2032                         }
2033                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2034                 }
2035         } else {
2036                 int ret;
2037
2038                 ret = tg3_phy_auxctl_read(tp,
2039                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2040                 if (!ret) {
2041                         if (enable)
2042                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2043                         else
2044                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2045                         tg3_phy_auxctl_write(tp,
2046                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2047                 }
2048         }
2049 }
2050
2051 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2052 {
2053         int ret;
2054         u32 val;
2055
2056         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2057                 return;
2058
2059         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2060         if (!ret)
2061                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2062                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2063 }
2064
2065 static void tg3_phy_apply_otp(struct tg3 *tp)
2066 {
2067         u32 otp, phy;
2068
2069         if (!tp->phy_otp)
2070                 return;
2071
2072         otp = tp->phy_otp;
2073
2074         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2075                 return;
2076
2077         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2078         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2079         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2080
2081         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2082               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2083         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2084
2085         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2086         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2087         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2088
2089         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2090         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2091
2092         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2093         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2094
2095         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2096               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2097         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2098
2099         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2100 }
2101
2102 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2103 {
2104         u32 val;
2105
2106         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2107                 return;
2108
2109         tp->setlpicnt = 0;
2110
2111         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2112             current_link_up == 1 &&
2113             tp->link_config.active_duplex == DUPLEX_FULL &&
2114             (tp->link_config.active_speed == SPEED_100 ||
2115              tp->link_config.active_speed == SPEED_1000)) {
2116                 u32 eeectl;
2117
2118                 if (tp->link_config.active_speed == SPEED_1000)
2119                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2120                 else
2121                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2122
2123                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2124
2125                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2126                                   TG3_CL45_D7_EEERES_STAT, &val);
2127
2128                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2129                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2130                         tp->setlpicnt = 2;
2131         }
2132
2133         if (!tp->setlpicnt) {
2134                 if (current_link_up == 1 &&
2135                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2136                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2137                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2138                 }
2139
2140                 val = tr32(TG3_CPMU_EEE_MODE);
2141                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2142         }
2143 }
2144
2145 static void tg3_phy_eee_enable(struct tg3 *tp)
2146 {
2147         u32 val;
2148
2149         if (tp->link_config.active_speed == SPEED_1000 &&
2150             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2151              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2152              tg3_flag(tp, 57765_CLASS)) &&
2153             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2154                 val = MII_TG3_DSP_TAP26_ALNOKO |
2155                       MII_TG3_DSP_TAP26_RMRXSTO;
2156                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2157                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2158         }
2159
2160         val = tr32(TG3_CPMU_EEE_MODE);
2161         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2162 }
2163
2164 static int tg3_wait_macro_done(struct tg3 *tp)
2165 {
2166         int limit = 100;
2167
2168         while (limit--) {
2169                 u32 tmp32;
2170
2171                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2172                         if ((tmp32 & 0x1000) == 0)
2173                                 break;
2174                 }
2175         }
2176         if (limit < 0)
2177                 return -EBUSY;
2178
2179         return 0;
2180 }
2181
2182 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2183 {
2184         static const u32 test_pat[4][6] = {
2185         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2186         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2187         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2188         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2189         };
2190         int chan;
2191
2192         for (chan = 0; chan < 4; chan++) {
2193                 int i;
2194
2195                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2196                              (chan * 0x2000) | 0x0200);
2197                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2198
2199                 for (i = 0; i < 6; i++)
2200                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2201                                      test_pat[chan][i]);
2202
2203                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2204                 if (tg3_wait_macro_done(tp)) {
2205                         *resetp = 1;
2206                         return -EBUSY;
2207                 }
2208
2209                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2210                              (chan * 0x2000) | 0x0200);
2211                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2212                 if (tg3_wait_macro_done(tp)) {
2213                         *resetp = 1;
2214                         return -EBUSY;
2215                 }
2216
2217                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2218                 if (tg3_wait_macro_done(tp)) {
2219                         *resetp = 1;
2220                         return -EBUSY;
2221                 }
2222
2223                 for (i = 0; i < 6; i += 2) {
2224                         u32 low, high;
2225
2226                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2227                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2228                             tg3_wait_macro_done(tp)) {
2229                                 *resetp = 1;
2230                                 return -EBUSY;
2231                         }
2232                         low &= 0x7fff;
2233                         high &= 0x000f;
2234                         if (low != test_pat[chan][i] ||
2235                             high != test_pat[chan][i+1]) {
2236                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2238                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2239
2240                                 return -EBUSY;
2241                         }
2242                 }
2243         }
2244
2245         return 0;
2246 }
2247
2248 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2249 {
2250         int chan;
2251
2252         for (chan = 0; chan < 4; chan++) {
2253                 int i;
2254
2255                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2256                              (chan * 0x2000) | 0x0200);
2257                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2258                 for (i = 0; i < 6; i++)
2259                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2260                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2261                 if (tg3_wait_macro_done(tp))
2262                         return -EBUSY;
2263         }
2264
2265         return 0;
2266 }
2267
2268 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2269 {
2270         u32 reg32, phy9_orig;
2271         int retries, do_phy_reset, err;
2272
2273         retries = 10;
2274         do_phy_reset = 1;
2275         do {
2276                 if (do_phy_reset) {
2277                         err = tg3_bmcr_reset(tp);
2278                         if (err)
2279                                 return err;
2280                         do_phy_reset = 0;
2281                 }
2282
2283                 /* Disable transmitter and interrupt.  */
2284                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2285                         continue;
2286
2287                 reg32 |= 0x3000;
2288                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2289
2290                 /* Set full-duplex, 1000 mbps.  */
2291                 tg3_writephy(tp, MII_BMCR,
2292                              BMCR_FULLDPLX | BMCR_SPEED1000);
2293
2294                 /* Set to master mode.  */
2295                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2296                         continue;
2297
2298                 tg3_writephy(tp, MII_CTRL1000,
2299                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2300
2301                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2302                 if (err)
2303                         return err;
2304
2305                 /* Block the PHY control access.  */
2306                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2307
2308                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2309                 if (!err)
2310                         break;
2311         } while (--retries);
2312
2313         err = tg3_phy_reset_chanpat(tp);
2314         if (err)
2315                 return err;
2316
2317         tg3_phydsp_write(tp, 0x8005, 0x0000);
2318
2319         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2320         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2321
2322         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2323
2324         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2325
2326         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2327                 reg32 &= ~0x3000;
2328                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2329         } else if (!err)
2330                 err = -EBUSY;
2331
2332         return err;
2333 }
2334
2335 /* This will reset the tigon3 PHY if there is no valid
2336  * link unless the FORCE argument is non-zero.
2337  */
2338 static int tg3_phy_reset(struct tg3 *tp)
2339 {
2340         u32 val, cpmuctrl;
2341         int err;
2342
2343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2344                 val = tr32(GRC_MISC_CFG);
2345                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2346                 udelay(40);
2347         }
2348         err  = tg3_readphy(tp, MII_BMSR, &val);
2349         err |= tg3_readphy(tp, MII_BMSR, &val);
2350         if (err != 0)
2351                 return -EBUSY;
2352
2353         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2354                 netif_carrier_off(tp->dev);
2355                 tg3_link_report(tp);
2356         }
2357
2358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2360             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2361                 err = tg3_phy_reset_5703_4_5(tp);
2362                 if (err)
2363                         return err;
2364                 goto out;
2365         }
2366
2367         cpmuctrl = 0;
2368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2369             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2370                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2371                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2372                         tw32(TG3_CPMU_CTRL,
2373                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2374         }
2375
2376         err = tg3_bmcr_reset(tp);
2377         if (err)
2378                 return err;
2379
2380         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2381                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2382                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2383
2384                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2385         }
2386
2387         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2388             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2389                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2390                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2391                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2392                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2393                         udelay(40);
2394                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2395                 }
2396         }
2397
2398         if (tg3_flag(tp, 5717_PLUS) &&
2399             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2400                 return 0;
2401
2402         tg3_phy_apply_otp(tp);
2403
2404         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2405                 tg3_phy_toggle_apd(tp, true);
2406         else
2407                 tg3_phy_toggle_apd(tp, false);
2408
2409 out:
2410         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2411             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2412                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2413                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2414                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2415         }
2416
2417         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2423                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2424                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2425                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2426                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2427                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2428                 }
2429         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2430                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2431                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2432                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2433                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2434                                 tg3_writephy(tp, MII_TG3_TEST1,
2435                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2436                         } else
2437                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2438
2439                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2440                 }
2441         }
2442
2443         /* Set Extended packet length bit (bit 14) on all chips that */
2444         /* support jumbo frames */
2445         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2446                 /* Cannot do read-modify-write on 5401 */
2447                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2448         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2449                 /* Set bit 14 with read-modify-write to preserve other bits */
2450                 err = tg3_phy_auxctl_read(tp,
2451                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2452                 if (!err)
2453                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2454                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2455         }
2456
2457         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2458          * jumbo frames transmission.
2459          */
2460         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2461                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2462                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2463                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2464         }
2465
2466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467                 /* adjust output voltage */
2468                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2469         }
2470
2471         tg3_phy_toggle_automdix(tp, 1);
2472         tg3_phy_set_wirespeed(tp);
2473         return 0;
2474 }
2475
2476 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2477 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2478 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2479                                           TG3_GPIO_MSG_NEED_VAUX)
2480 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2481         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2484          (TG3_GPIO_MSG_DRVR_PRES << 12))
2485
2486 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2487         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2490          (TG3_GPIO_MSG_NEED_VAUX << 12))
2491
2492 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2493 {
2494         u32 status, shift;
2495
2496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2498                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2499         else
2500                 status = tr32(TG3_CPMU_DRV_STATUS);
2501
2502         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2503         status &= ~(TG3_GPIO_MSG_MASK << shift);
2504         status |= (newstat << shift);
2505
2506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2508                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2509         else
2510                 tw32(TG3_CPMU_DRV_STATUS, status);
2511
2512         return status >> TG3_APE_GPIO_MSG_SHIFT;
2513 }
2514
2515 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2516 {
2517         if (!tg3_flag(tp, IS_NIC))
2518                 return 0;
2519
2520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2523                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2524                         return -EIO;
2525
2526                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2527
2528                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2529                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2530
2531                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2532         } else {
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535         }
2536
2537         return 0;
2538 }
2539
2540 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2541 {
2542         u32 grc_local_ctrl;
2543
2544         if (!tg3_flag(tp, IS_NIC) ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2547                 return;
2548
2549         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2550
2551         tw32_wait_f(GRC_LOCAL_CTRL,
2552                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2553                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2554
2555         tw32_wait_f(GRC_LOCAL_CTRL,
2556                     grc_local_ctrl,
2557                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559         tw32_wait_f(GRC_LOCAL_CTRL,
2560                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2561                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2562 }
2563
2564 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2565 {
2566         if (!tg3_flag(tp, IS_NIC))
2567                 return;
2568
2569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2571                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2572                             (GRC_LCLCTRL_GPIO_OE0 |
2573                              GRC_LCLCTRL_GPIO_OE1 |
2574                              GRC_LCLCTRL_GPIO_OE2 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2576                              GRC_LCLCTRL_GPIO_OUTPUT1),
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2579                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2580                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2581                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2582                                      GRC_LCLCTRL_GPIO_OE1 |
2583                                      GRC_LCLCTRL_GPIO_OE2 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2585                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2586                                      tp->grc_local_ctrl;
2587                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2588                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2589
2590                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2591                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2593
2594                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2595                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2597         } else {
2598                 u32 no_gpio2;
2599                 u32 grc_local_ctrl = 0;
2600
2601                 /* Workaround to prevent overdrawing Amps. */
2602                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2603                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2604                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2605                                     grc_local_ctrl,
2606                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2607                 }
2608
2609                 /* On 5753 and variants, GPIO2 cannot be used. */
2610                 no_gpio2 = tp->nic_sram_data_cfg &
2611                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2612
2613                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2614                                   GRC_LCLCTRL_GPIO_OE1 |
2615                                   GRC_LCLCTRL_GPIO_OE2 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2617                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2618                 if (no_gpio2) {
2619                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2620                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2621                 }
2622                 tw32_wait_f(GRC_LOCAL_CTRL,
2623                             tp->grc_local_ctrl | grc_local_ctrl,
2624                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2625
2626                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2627
2628                 tw32_wait_f(GRC_LOCAL_CTRL,
2629                             tp->grc_local_ctrl | grc_local_ctrl,
2630                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2631
2632                 if (!no_gpio2) {
2633                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2634                         tw32_wait_f(GRC_LOCAL_CTRL,
2635                                     tp->grc_local_ctrl | grc_local_ctrl,
2636                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2637                 }
2638         }
2639 }
2640
2641 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2642 {
2643         u32 msg = 0;
2644
2645         /* Serialize power state transitions */
2646         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2647                 return;
2648
2649         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2650                 msg = TG3_GPIO_MSG_NEED_VAUX;
2651
2652         msg = tg3_set_function_status(tp, msg);
2653
2654         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2655                 goto done;
2656
2657         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2658                 tg3_pwrsrc_switch_to_vaux(tp);
2659         else
2660                 tg3_pwrsrc_die_with_vmain(tp);
2661
2662 done:
2663         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2664 }
2665
2666 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2667 {
2668         bool need_vaux = false;
2669
2670         /* The GPIOs do something completely different on 57765. */
2671         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2672                 return;
2673
2674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2677                 tg3_frob_aux_power_5717(tp, include_wol ?
2678                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2679                 return;
2680         }
2681
2682         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2683                 struct net_device *dev_peer;
2684
2685                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2686
2687                 /* remove_one() may have been run on the peer. */
2688                 if (dev_peer) {
2689                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2690
2691                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2692                                 return;
2693
2694                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2695                             tg3_flag(tp_peer, ENABLE_ASF))
2696                                 need_vaux = true;
2697                 }
2698         }
2699
2700         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2701             tg3_flag(tp, ENABLE_ASF))
2702                 need_vaux = true;
2703
2704         if (need_vaux)
2705                 tg3_pwrsrc_switch_to_vaux(tp);
2706         else
2707                 tg3_pwrsrc_die_with_vmain(tp);
2708 }
2709
2710 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2711 {
2712         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2713                 return 1;
2714         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2715                 if (speed != SPEED_10)
2716                         return 1;
2717         } else if (speed == SPEED_10)
2718                 return 1;
2719
2720         return 0;
2721 }
2722
2723 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2724 {
2725         u32 val;
2726
2727         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2728                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2729                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2730                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2731
2732                         sg_dig_ctrl |=
2733                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2734                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2735                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2736                 }
2737                 return;
2738         }
2739
2740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2741                 tg3_bmcr_reset(tp);
2742                 val = tr32(GRC_MISC_CFG);
2743                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2744                 udelay(40);
2745                 return;
2746         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2747                 u32 phytest;
2748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2749                         u32 phy;
2750
2751                         tg3_writephy(tp, MII_ADVERTISE, 0);
2752                         tg3_writephy(tp, MII_BMCR,
2753                                      BMCR_ANENABLE | BMCR_ANRESTART);
2754
2755                         tg3_writephy(tp, MII_TG3_FET_TEST,
2756                                      phytest | MII_TG3_FET_SHADOW_EN);
2757                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2758                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2759                                 tg3_writephy(tp,
2760                                              MII_TG3_FET_SHDW_AUXMODE4,
2761                                              phy);
2762                         }
2763                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2764                 }
2765                 return;
2766         } else if (do_low_power) {
2767                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2768                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2769
2770                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2771                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2772                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2773                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2774         }
2775
2776         /* The PHY should not be powered down on some chips because
2777          * of bugs.
2778          */
2779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2780             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2781             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2782              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2783                 return;
2784
2785         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2786             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2787                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2788                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2789                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2790                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2791         }
2792
2793         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2794 }
2795
2796 /* tp->lock is held. */
2797 static int tg3_nvram_lock(struct tg3 *tp)
2798 {
2799         if (tg3_flag(tp, NVRAM)) {
2800                 int i;
2801
2802                 if (tp->nvram_lock_cnt == 0) {
2803                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2804                         for (i = 0; i < 8000; i++) {
2805                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2806                                         break;
2807                                 udelay(20);
2808                         }
2809                         if (i == 8000) {
2810                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2811                                 return -ENODEV;
2812                         }
2813                 }
2814                 tp->nvram_lock_cnt++;
2815         }
2816         return 0;
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_nvram_unlock(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, NVRAM)) {
2823                 if (tp->nvram_lock_cnt > 0)
2824                         tp->nvram_lock_cnt--;
2825                 if (tp->nvram_lock_cnt == 0)
2826                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2827         }
2828 }
2829
2830 /* tp->lock is held. */
2831 static void tg3_enable_nvram_access(struct tg3 *tp)
2832 {
2833         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2834                 u32 nvaccess = tr32(NVRAM_ACCESS);
2835
2836                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2837         }
2838 }
2839
2840 /* tp->lock is held. */
2841 static void tg3_disable_nvram_access(struct tg3 *tp)
2842 {
2843         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2844                 u32 nvaccess = tr32(NVRAM_ACCESS);
2845
2846                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2847         }
2848 }
2849
2850 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2851                                         u32 offset, u32 *val)
2852 {
2853         u32 tmp;
2854         int i;
2855
2856         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2857                 return -EINVAL;
2858
2859         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2860                                         EEPROM_ADDR_DEVID_MASK |
2861                                         EEPROM_ADDR_READ);
2862         tw32(GRC_EEPROM_ADDR,
2863              tmp |
2864              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2865              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2866               EEPROM_ADDR_ADDR_MASK) |
2867              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2868
2869         for (i = 0; i < 1000; i++) {
2870                 tmp = tr32(GRC_EEPROM_ADDR);
2871
2872                 if (tmp & EEPROM_ADDR_COMPLETE)
2873                         break;
2874                 msleep(1);
2875         }
2876         if (!(tmp & EEPROM_ADDR_COMPLETE))
2877                 return -EBUSY;
2878
2879         tmp = tr32(GRC_EEPROM_DATA);
2880
2881         /*
2882          * The data will always be opposite the native endian
2883          * format.  Perform a blind byteswap to compensate.
2884          */
2885         *val = swab32(tmp);
2886
2887         return 0;
2888 }
2889
2890 #define NVRAM_CMD_TIMEOUT 10000
2891
2892 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2893 {
2894         int i;
2895
2896         tw32(NVRAM_CMD, nvram_cmd);
2897         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2898                 udelay(10);
2899                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2900                         udelay(10);
2901                         break;
2902                 }
2903         }
2904
2905         if (i == NVRAM_CMD_TIMEOUT)
2906                 return -EBUSY;
2907
2908         return 0;
2909 }
2910
2911 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2912 {
2913         if (tg3_flag(tp, NVRAM) &&
2914             tg3_flag(tp, NVRAM_BUFFERED) &&
2915             tg3_flag(tp, FLASH) &&
2916             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2917             (tp->nvram_jedecnum == JEDEC_ATMEL))
2918
2919                 addr = ((addr / tp->nvram_pagesize) <<
2920                         ATMEL_AT45DB0X1B_PAGE_POS) +
2921                        (addr % tp->nvram_pagesize);
2922
2923         return addr;
2924 }
2925
2926 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2927 {
2928         if (tg3_flag(tp, NVRAM) &&
2929             tg3_flag(tp, NVRAM_BUFFERED) &&
2930             tg3_flag(tp, FLASH) &&
2931             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932             (tp->nvram_jedecnum == JEDEC_ATMEL))
2933
2934                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2935                         tp->nvram_pagesize) +
2936                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2937
2938         return addr;
2939 }
2940
2941 /* NOTE: Data read in from NVRAM is byteswapped according to
2942  * the byteswapping settings for all other register accesses.
2943  * tg3 devices are BE devices, so on a BE machine, the data
2944  * returned will be exactly as it is seen in NVRAM.  On a LE
2945  * machine, the 32-bit value will be byteswapped.
2946  */
2947 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2948 {
2949         int ret;
2950
2951         if (!tg3_flag(tp, NVRAM))
2952                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2953
2954         offset = tg3_nvram_phys_addr(tp, offset);
2955
2956         if (offset > NVRAM_ADDR_MSK)
2957                 return -EINVAL;
2958
2959         ret = tg3_nvram_lock(tp);
2960         if (ret)
2961                 return ret;
2962
2963         tg3_enable_nvram_access(tp);
2964
2965         tw32(NVRAM_ADDR, offset);
2966         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2967                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2968
2969         if (ret == 0)
2970                 *val = tr32(NVRAM_RDDATA);
2971
2972         tg3_disable_nvram_access(tp);
2973
2974         tg3_nvram_unlock(tp);
2975
2976         return ret;
2977 }
2978
2979 /* Ensures NVRAM data is in bytestream format. */
2980 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2981 {
2982         u32 v;
2983         int res = tg3_nvram_read(tp, offset, &v);
2984         if (!res)
2985                 *val = cpu_to_be32(v);
2986         return res;
2987 }
2988
2989 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2990                                     u32 offset, u32 len, u8 *buf)
2991 {
2992         int i, j, rc = 0;
2993         u32 val;
2994
2995         for (i = 0; i < len; i += 4) {
2996                 u32 addr;
2997                 __be32 data;
2998
2999                 addr = offset + i;
3000
3001                 memcpy(&data, buf + i, 4);
3002
3003                 /*
3004                  * The SEEPROM interface expects the data to always be opposite
3005                  * the native endian format.  We accomplish this by reversing
3006                  * all the operations that would have been performed on the
3007                  * data from a call to tg3_nvram_read_be32().
3008                  */
3009                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3010
3011                 val = tr32(GRC_EEPROM_ADDR);
3012                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3013
3014                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3015                         EEPROM_ADDR_READ);
3016                 tw32(GRC_EEPROM_ADDR, val |
3017                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3018                         (addr & EEPROM_ADDR_ADDR_MASK) |
3019                         EEPROM_ADDR_START |
3020                         EEPROM_ADDR_WRITE);
3021
3022                 for (j = 0; j < 1000; j++) {
3023                         val = tr32(GRC_EEPROM_ADDR);
3024
3025                         if (val & EEPROM_ADDR_COMPLETE)
3026                                 break;
3027                         msleep(1);
3028                 }
3029                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3030                         rc = -EBUSY;
3031                         break;
3032                 }
3033         }
3034
3035         return rc;
3036 }
3037
3038 /* offset and length are dword aligned */
3039 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3040                 u8 *buf)
3041 {
3042         int ret = 0;
3043         u32 pagesize = tp->nvram_pagesize;
3044         u32 pagemask = pagesize - 1;
3045         u32 nvram_cmd;
3046         u8 *tmp;
3047
3048         tmp = kmalloc(pagesize, GFP_KERNEL);
3049         if (tmp == NULL)
3050                 return -ENOMEM;
3051
3052         while (len) {
3053                 int j;
3054                 u32 phy_addr, page_off, size;
3055
3056                 phy_addr = offset & ~pagemask;
3057
3058                 for (j = 0; j < pagesize; j += 4) {
3059                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3060                                                   (__be32 *) (tmp + j));
3061                         if (ret)
3062                                 break;
3063                 }
3064                 if (ret)
3065                         break;
3066
3067                 page_off = offset & pagemask;
3068                 size = pagesize;
3069                 if (len < size)
3070                         size = len;
3071
3072                 len -= size;
3073
3074                 memcpy(tmp + page_off, buf, size);
3075
3076                 offset = offset + (pagesize - page_off);
3077
3078                 tg3_enable_nvram_access(tp);
3079
3080                 /*
3081                  * Before we can erase the flash page, we need
3082                  * to issue a special "write enable" command.
3083                  */
3084                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3085
3086                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3087                         break;
3088
3089                 /* Erase the target page */
3090                 tw32(NVRAM_ADDR, phy_addr);
3091
3092                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3093                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3094
3095                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3096                         break;
3097
3098                 /* Issue another write enable to start the write. */
3099                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3100
3101                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3102                         break;
3103
3104                 for (j = 0; j < pagesize; j += 4) {
3105                         __be32 data;
3106
3107                         data = *((__be32 *) (tmp + j));
3108
3109                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3110
3111                         tw32(NVRAM_ADDR, phy_addr + j);
3112
3113                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3114                                 NVRAM_CMD_WR;
3115
3116                         if (j == 0)
3117                                 nvram_cmd |= NVRAM_CMD_FIRST;
3118                         else if (j == (pagesize - 4))
3119                                 nvram_cmd |= NVRAM_CMD_LAST;
3120
3121                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3122                         if (ret)
3123                                 break;
3124                 }
3125                 if (ret)
3126                         break;
3127         }
3128
3129         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3130         tg3_nvram_exec_cmd(tp, nvram_cmd);
3131
3132         kfree(tmp);
3133
3134         return ret;
3135 }
3136
3137 /* offset and length are dword aligned */
3138 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3139                 u8 *buf)
3140 {
3141         int i, ret = 0;
3142
3143         for (i = 0; i < len; i += 4, offset += 4) {
3144                 u32 page_off, phy_addr, nvram_cmd;
3145                 __be32 data;
3146
3147                 memcpy(&data, buf + i, 4);
3148                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3149
3150                 page_off = offset % tp->nvram_pagesize;
3151
3152                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3153
3154                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3155
3156                 if (page_off == 0 || i == 0)
3157                         nvram_cmd |= NVRAM_CMD_FIRST;
3158                 if (page_off == (tp->nvram_pagesize - 4))
3159                         nvram_cmd |= NVRAM_CMD_LAST;
3160
3161                 if (i == (len - 4))
3162                         nvram_cmd |= NVRAM_CMD_LAST;
3163
3164                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3165                     !tg3_flag(tp, FLASH) ||
3166                     !tg3_flag(tp, 57765_PLUS))
3167                         tw32(NVRAM_ADDR, phy_addr);
3168
3169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3170                     !tg3_flag(tp, 5755_PLUS) &&
3171                     (tp->nvram_jedecnum == JEDEC_ST) &&
3172                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3173                         u32 cmd;
3174
3175                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3176                         ret = tg3_nvram_exec_cmd(tp, cmd);
3177                         if (ret)
3178                                 break;
3179                 }
3180                 if (!tg3_flag(tp, FLASH)) {
3181                         /* We always do complete word writes to eeprom. */
3182                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3183                 }
3184
3185                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3186                 if (ret)
3187                         break;
3188         }
3189         return ret;
3190 }
3191
3192 /* offset and length are dword aligned */
3193 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3194 {
3195         int ret;
3196
3197         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3198                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3199                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3200                 udelay(40);
3201         }
3202
3203         if (!tg3_flag(tp, NVRAM)) {
3204                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3205         } else {
3206                 u32 grc_mode;
3207
3208                 ret = tg3_nvram_lock(tp);
3209                 if (ret)
3210                         return ret;
3211
3212                 tg3_enable_nvram_access(tp);
3213                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3214                         tw32(NVRAM_WRITE1, 0x406);
3215
3216                 grc_mode = tr32(GRC_MODE);
3217                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3218
3219                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3220                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3221                                 buf);
3222                 } else {
3223                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3224                                 buf);
3225                 }
3226
3227                 grc_mode = tr32(GRC_MODE);
3228                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3229
3230                 tg3_disable_nvram_access(tp);
3231                 tg3_nvram_unlock(tp);
3232         }
3233
3234         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3235                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3236                 udelay(40);
3237         }
3238
3239         return ret;
3240 }
3241
3242 #define RX_CPU_SCRATCH_BASE     0x30000
3243 #define RX_CPU_SCRATCH_SIZE     0x04000
3244 #define TX_CPU_SCRATCH_BASE     0x34000
3245 #define TX_CPU_SCRATCH_SIZE     0x04000
3246
3247 /* tp->lock is held. */
3248 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3249 {
3250         int i;
3251
3252         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3253
3254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3255                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3256
3257                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3258                 return 0;
3259         }
3260         if (offset == RX_CPU_BASE) {
3261                 for (i = 0; i < 10000; i++) {
3262                         tw32(offset + CPU_STATE, 0xffffffff);
3263                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3264                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3265                                 break;
3266                 }
3267
3268                 tw32(offset + CPU_STATE, 0xffffffff);
3269                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3270                 udelay(10);
3271         } else {
3272                 for (i = 0; i < 10000; i++) {
3273                         tw32(offset + CPU_STATE, 0xffffffff);
3274                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3275                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3276                                 break;
3277                 }
3278         }
3279
3280         if (i >= 10000) {
3281                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3282                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3283                 return -ENODEV;
3284         }
3285
3286         /* Clear firmware's nvram arbitration. */
3287         if (tg3_flag(tp, NVRAM))
3288                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3289         return 0;
3290 }
3291
3292 struct fw_info {
3293         unsigned int fw_base;
3294         unsigned int fw_len;
3295         const __be32 *fw_data;
3296 };
3297
3298 /* tp->lock is held. */
3299 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3300                                  u32 cpu_scratch_base, int cpu_scratch_size,
3301                                  struct fw_info *info)
3302 {
3303         int err, lock_err, i;
3304         void (*write_op)(struct tg3 *, u32, u32);
3305
3306         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3307                 netdev_err(tp->dev,
3308                            "%s: Trying to load TX cpu firmware which is 5705\n",
3309                            __func__);
3310                 return -EINVAL;
3311         }
3312
3313         if (tg3_flag(tp, 5705_PLUS))
3314                 write_op = tg3_write_mem;
3315         else
3316                 write_op = tg3_write_indirect_reg32;
3317
3318         /* It is possible that bootcode is still loading at this point.
3319          * Get the nvram lock first before halting the cpu.
3320          */
3321         lock_err = tg3_nvram_lock(tp);
3322         err = tg3_halt_cpu(tp, cpu_base);
3323         if (!lock_err)
3324                 tg3_nvram_unlock(tp);
3325         if (err)
3326                 goto out;
3327
3328         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3329                 write_op(tp, cpu_scratch_base + i, 0);
3330         tw32(cpu_base + CPU_STATE, 0xffffffff);
3331         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3332         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3333                 write_op(tp, (cpu_scratch_base +
3334                               (info->fw_base & 0xffff) +
3335                               (i * sizeof(u32))),
3336                               be32_to_cpu(info->fw_data[i]));
3337
3338         err = 0;
3339
3340 out:
3341         return err;
3342 }
3343
3344 /* tp->lock is held. */
3345 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3346 {
3347         struct fw_info info;
3348         const __be32 *fw_data;
3349         int err, i;
3350
3351         fw_data = (void *)tp->fw->data;
3352
3353         /* Firmware blob starts with version numbers, followed by
3354            start address and length. We are setting complete length.
3355            length = end_address_of_bss - start_address_of_text.
3356            Remainder is the blob to be loaded contiguously
3357            from start address. */
3358
3359         info.fw_base = be32_to_cpu(fw_data[1]);
3360         info.fw_len = tp->fw->size - 12;
3361         info.fw_data = &fw_data[3];
3362
3363         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3364                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3365                                     &info);
3366         if (err)
3367                 return err;
3368
3369         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3370                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3371                                     &info);
3372         if (err)
3373                 return err;
3374
3375         /* Now startup only the RX cpu. */
3376         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3377         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3378
3379         for (i = 0; i < 5; i++) {
3380                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3381                         break;
3382                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3384                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385                 udelay(1000);
3386         }
3387         if (i >= 5) {
3388                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3389                            "should be %08x\n", __func__,
3390                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3391                 return -ENODEV;
3392         }
3393         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3394         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3395
3396         return 0;
3397 }
3398
3399 /* tp->lock is held. */
3400 static int tg3_load_tso_firmware(struct tg3 *tp)
3401 {
3402         struct fw_info info;
3403         const __be32 *fw_data;
3404         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3405         int err, i;
3406
3407         if (tg3_flag(tp, HW_TSO_1) ||
3408             tg3_flag(tp, HW_TSO_2) ||
3409             tg3_flag(tp, HW_TSO_3))
3410                 return 0;
3411
3412         fw_data = (void *)tp->fw->data;
3413
3414         /* Firmware blob starts with version numbers, followed by
3415            start address and length. We are setting complete length.
3416            length = end_address_of_bss - start_address_of_text.
3417            Remainder is the blob to be loaded contiguously
3418            from start address. */
3419
3420         info.fw_base = be32_to_cpu(fw_data[1]);
3421         cpu_scratch_size = tp->fw_len;
3422         info.fw_len = tp->fw->size - 12;
3423         info.fw_data = &fw_data[3];
3424
3425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3426                 cpu_base = RX_CPU_BASE;
3427                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3428         } else {
3429                 cpu_base = TX_CPU_BASE;
3430                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3431                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3432         }
3433
3434         err = tg3_load_firmware_cpu(tp, cpu_base,
3435                                     cpu_scratch_base, cpu_scratch_size,
3436                                     &info);
3437         if (err)
3438                 return err;
3439
3440         /* Now startup the cpu. */
3441         tw32(cpu_base + CPU_STATE, 0xffffffff);
3442         tw32_f(cpu_base + CPU_PC, info.fw_base);
3443
3444         for (i = 0; i < 5; i++) {
3445                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3446                         break;
3447                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3449                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3450                 udelay(1000);
3451         }
3452         if (i >= 5) {
3453                 netdev_err(tp->dev,
3454                            "%s fails to set CPU PC, is %08x should be %08x\n",
3455                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3456                 return -ENODEV;
3457         }
3458         tw32(cpu_base + CPU_STATE, 0xffffffff);
3459         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3460         return 0;
3461 }
3462
3463
3464 /* tp->lock is held. */
3465 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3466 {
3467         u32 addr_high, addr_low;
3468         int i;
3469
3470         addr_high = ((tp->dev->dev_addr[0] << 8) |
3471                      tp->dev->dev_addr[1]);
3472         addr_low = ((tp->dev->dev_addr[2] << 24) |
3473                     (tp->dev->dev_addr[3] << 16) |
3474                     (tp->dev->dev_addr[4] <<  8) |
3475                     (tp->dev->dev_addr[5] <<  0));
3476         for (i = 0; i < 4; i++) {
3477                 if (i == 1 && skip_mac_1)
3478                         continue;
3479                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3480                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3481         }
3482
3483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3485                 for (i = 0; i < 12; i++) {
3486                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3487                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3488                 }
3489         }
3490
3491         addr_high = (tp->dev->dev_addr[0] +
3492                      tp->dev->dev_addr[1] +
3493                      tp->dev->dev_addr[2] +
3494                      tp->dev->dev_addr[3] +
3495                      tp->dev->dev_addr[4] +
3496                      tp->dev->dev_addr[5]) &
3497                 TX_BACKOFF_SEED_MASK;
3498         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3499 }
3500
3501 static void tg3_enable_register_access(struct tg3 *tp)
3502 {
3503         /*
3504          * Make sure register accesses (indirect or otherwise) will function
3505          * correctly.
3506          */
3507         pci_write_config_dword(tp->pdev,
3508                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3509 }
3510
3511 static int tg3_power_up(struct tg3 *tp)
3512 {
3513         int err;
3514
3515         tg3_enable_register_access(tp);
3516
3517         err = pci_set_power_state(tp->pdev, PCI_D0);
3518         if (!err) {
3519                 /* Switch out of Vaux if it is a NIC */
3520                 tg3_pwrsrc_switch_to_vmain(tp);
3521         } else {
3522                 netdev_err(tp->dev, "Transition to D0 failed\n");
3523         }
3524
3525         return err;
3526 }
3527
3528 static int tg3_setup_phy(struct tg3 *, int);
3529
3530 static int tg3_power_down_prepare(struct tg3 *tp)
3531 {
3532         u32 misc_host_ctrl;
3533         bool device_should_wake, do_low_power;
3534
3535         tg3_enable_register_access(tp);
3536
3537         /* Restore the CLKREQ setting. */
3538         if (tg3_flag(tp, CLKREQ_BUG)) {
3539                 u16 lnkctl;
3540
3541                 pci_read_config_word(tp->pdev,
3542                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3543                                      &lnkctl);
3544                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3545                 pci_write_config_word(tp->pdev,
3546                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3547                                       lnkctl);
3548         }
3549
3550         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3551         tw32(TG3PCI_MISC_HOST_CTRL,
3552              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3553
3554         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3555                              tg3_flag(tp, WOL_ENABLE);
3556
3557         if (tg3_flag(tp, USE_PHYLIB)) {
3558                 do_low_power = false;
3559                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3560                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3561                         struct phy_device *phydev;
3562                         u32 phyid, advertising;
3563
3564                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3565
3566                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3567
3568                         tp->link_config.speed = phydev->speed;
3569                         tp->link_config.duplex = phydev->duplex;
3570                         tp->link_config.autoneg = phydev->autoneg;
3571                         tp->link_config.advertising = phydev->advertising;
3572
3573                         advertising = ADVERTISED_TP |
3574                                       ADVERTISED_Pause |
3575                                       ADVERTISED_Autoneg |
3576                                       ADVERTISED_10baseT_Half;
3577
3578                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3579                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3580                                         advertising |=
3581                                                 ADVERTISED_100baseT_Half |
3582                                                 ADVERTISED_100baseT_Full |
3583                                                 ADVERTISED_10baseT_Full;
3584                                 else
3585                                         advertising |= ADVERTISED_10baseT_Full;
3586                         }
3587
3588                         phydev->advertising = advertising;
3589
3590                         phy_start_aneg(phydev);
3591
3592                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3593                         if (phyid != PHY_ID_BCMAC131) {
3594                                 phyid &= PHY_BCM_OUI_MASK;
3595                                 if (phyid == PHY_BCM_OUI_1 ||
3596                                     phyid == PHY_BCM_OUI_2 ||
3597                                     phyid == PHY_BCM_OUI_3)
3598                                         do_low_power = true;
3599                         }
3600                 }
3601         } else {
3602                 do_low_power = true;
3603
3604                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3605                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3606
3607                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3608                         tg3_setup_phy(tp, 0);
3609         }
3610
3611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3612                 u32 val;
3613
3614                 val = tr32(GRC_VCPU_EXT_CTRL);
3615                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3616         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3617                 int i;
3618                 u32 val;
3619
3620                 for (i = 0; i < 200; i++) {
3621                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3622                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3623                                 break;
3624                         msleep(1);
3625                 }
3626         }
3627         if (tg3_flag(tp, WOL_CAP))
3628                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3629                                                      WOL_DRV_STATE_SHUTDOWN |
3630                                                      WOL_DRV_WOL |
3631                                                      WOL_SET_MAGIC_PKT);
3632
3633         if (device_should_wake) {
3634                 u32 mac_mode;
3635
3636                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3637                         if (do_low_power &&
3638                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3639                                 tg3_phy_auxctl_write(tp,
3640                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3641                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3642                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3643                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3644                                 udelay(40);
3645                         }
3646
3647                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3648                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3649                         else
3650                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3651
3652                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3653                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3654                             ASIC_REV_5700) {
3655                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3656                                              SPEED_100 : SPEED_10;
3657                                 if (tg3_5700_link_polarity(tp, speed))
3658                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3659                                 else
3660                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3661                         }
3662                 } else {
3663                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3664                 }
3665
3666                 if (!tg3_flag(tp, 5750_PLUS))
3667                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3668
3669                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3670                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3671                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3672                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3673
3674                 if (tg3_flag(tp, ENABLE_APE))
3675                         mac_mode |= MAC_MODE_APE_TX_EN |
3676                                     MAC_MODE_APE_RX_EN |
3677                                     MAC_MODE_TDE_ENABLE;
3678
3679                 tw32_f(MAC_MODE, mac_mode);
3680                 udelay(100);
3681
3682                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3683                 udelay(10);
3684         }
3685
3686         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3687             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3688              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3689                 u32 base_val;
3690
3691                 base_val = tp->pci_clock_ctrl;
3692                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3693                              CLOCK_CTRL_TXCLK_DISABLE);
3694
3695                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3696                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3697         } else if (tg3_flag(tp, 5780_CLASS) ||
3698                    tg3_flag(tp, CPMU_PRESENT) ||
3699                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3700                 /* do nothing */
3701         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3702                 u32 newbits1, newbits2;
3703
3704                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3705                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3706                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3707                                     CLOCK_CTRL_TXCLK_DISABLE |
3708                                     CLOCK_CTRL_ALTCLK);
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 } else if (tg3_flag(tp, 5705_PLUS)) {
3711                         newbits1 = CLOCK_CTRL_625_CORE;
3712                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3713                 } else {
3714                         newbits1 = CLOCK_CTRL_ALTCLK;
3715                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716                 }
3717
3718                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3719                             40);
3720
3721                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3722                             40);
3723
3724                 if (!tg3_flag(tp, 5705_PLUS)) {
3725                         u32 newbits3;
3726
3727                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3728                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3729                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3730                                             CLOCK_CTRL_TXCLK_DISABLE |
3731                                             CLOCK_CTRL_44MHZ_CORE);
3732                         } else {
3733                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3734                         }
3735
3736                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3737                                     tp->pci_clock_ctrl | newbits3, 40);
3738                 }
3739         }
3740
3741         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3742                 tg3_power_down_phy(tp, do_low_power);
3743
3744         tg3_frob_aux_power(tp, true);
3745
3746         /* Workaround for unstable PLL clock */
3747         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3748             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3749                 u32 val = tr32(0x7d00);
3750
3751                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3752                 tw32(0x7d00, val);
3753                 if (!tg3_flag(tp, ENABLE_ASF)) {
3754                         int err;
3755
3756                         err = tg3_nvram_lock(tp);
3757                         tg3_halt_cpu(tp, RX_CPU_BASE);
3758                         if (!err)
3759                                 tg3_nvram_unlock(tp);
3760                 }
3761         }
3762
3763         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3764
3765         return 0;
3766 }
3767
3768 static void tg3_power_down(struct tg3 *tp)
3769 {
3770         tg3_power_down_prepare(tp);
3771
3772         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3773         pci_set_power_state(tp->pdev, PCI_D3hot);
3774 }
3775
3776 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3777 {
3778         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3779         case MII_TG3_AUX_STAT_10HALF:
3780                 *speed = SPEED_10;
3781                 *duplex = DUPLEX_HALF;
3782                 break;
3783
3784         case MII_TG3_AUX_STAT_10FULL:
3785                 *speed = SPEED_10;
3786                 *duplex = DUPLEX_FULL;
3787                 break;
3788
3789         case MII_TG3_AUX_STAT_100HALF:
3790                 *speed = SPEED_100;
3791                 *duplex = DUPLEX_HALF;
3792                 break;
3793
3794         case MII_TG3_AUX_STAT_100FULL:
3795                 *speed = SPEED_100;
3796                 *duplex = DUPLEX_FULL;
3797                 break;
3798
3799         case MII_TG3_AUX_STAT_1000HALF:
3800                 *speed = SPEED_1000;
3801                 *duplex = DUPLEX_HALF;
3802                 break;
3803
3804         case MII_TG3_AUX_STAT_1000FULL:
3805                 *speed = SPEED_1000;
3806                 *duplex = DUPLEX_FULL;
3807                 break;
3808
3809         default:
3810                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3811                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3812                                  SPEED_10;
3813                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3814                                   DUPLEX_HALF;
3815                         break;
3816                 }
3817                 *speed = SPEED_UNKNOWN;
3818                 *duplex = DUPLEX_UNKNOWN;
3819                 break;
3820         }
3821 }
3822
3823 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3824 {
3825         int err = 0;
3826         u32 val, new_adv;
3827
3828         new_adv = ADVERTISE_CSMA;
3829         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3830         new_adv |= mii_advertise_flowctrl(flowctrl);
3831
3832         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3833         if (err)
3834                 goto done;
3835
3836         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3837                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3838
3839                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3840                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3841                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3842
3843                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3844                 if (err)
3845                         goto done;
3846         }
3847
3848         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3849                 goto done;
3850
3851         tw32(TG3_CPMU_EEE_MODE,
3852              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3853
3854         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3855         if (!err) {
3856                 u32 err2;
3857
3858                 val = 0;
3859                 /* Advertise 100-BaseTX EEE ability */
3860                 if (advertise & ADVERTISED_100baseT_Full)
3861                         val |= MDIO_AN_EEE_ADV_100TX;
3862                 /* Advertise 1000-BaseT EEE ability */
3863                 if (advertise & ADVERTISED_1000baseT_Full)
3864                         val |= MDIO_AN_EEE_ADV_1000T;
3865                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3866                 if (err)
3867                         val = 0;
3868
3869                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3870                 case ASIC_REV_5717:
3871                 case ASIC_REV_57765:
3872                 case ASIC_REV_57766:
3873                 case ASIC_REV_5719:
3874                         /* If we advertised any eee advertisements above... */
3875                         if (val)
3876                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3877                                       MII_TG3_DSP_TAP26_RMRXSTO |
3878                                       MII_TG3_DSP_TAP26_OPCSINPT;
3879                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3880                         /* Fall through */
3881                 case ASIC_REV_5720:
3882                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3883                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3884                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3885                 }
3886
3887                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3888                 if (!err)
3889                         err = err2;
3890         }
3891
3892 done:
3893         return err;
3894 }
3895
3896 static void tg3_phy_copper_begin(struct tg3 *tp)
3897 {
3898         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3899             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3900                 u32 adv, fc;
3901
3902                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3903                         adv = ADVERTISED_10baseT_Half |
3904                               ADVERTISED_10baseT_Full;
3905                         if (tg3_flag(tp, WOL_SPEED_100MB))
3906                                 adv |= ADVERTISED_100baseT_Half |
3907                                        ADVERTISED_100baseT_Full;
3908
3909                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3910                 } else {
3911                         adv = tp->link_config.advertising;
3912                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3913                                 adv &= ~(ADVERTISED_1000baseT_Half |
3914                                          ADVERTISED_1000baseT_Full);
3915
3916                         fc = tp->link_config.flowctrl;
3917                 }
3918
3919                 tg3_phy_autoneg_cfg(tp, adv, fc);
3920
3921                 tg3_writephy(tp, MII_BMCR,
3922                              BMCR_ANENABLE | BMCR_ANRESTART);
3923         } else {
3924                 int i;
3925                 u32 bmcr, orig_bmcr;
3926
3927                 tp->link_config.active_speed = tp->link_config.speed;
3928                 tp->link_config.active_duplex = tp->link_config.duplex;
3929
3930                 bmcr = 0;
3931                 switch (tp->link_config.speed) {
3932                 default:
3933                 case SPEED_10:
3934                         break;
3935
3936                 case SPEED_100:
3937                         bmcr |= BMCR_SPEED100;
3938                         break;
3939
3940                 case SPEED_1000:
3941                         bmcr |= BMCR_SPEED1000;
3942                         break;
3943                 }
3944
3945                 if (tp->link_config.duplex == DUPLEX_FULL)
3946                         bmcr |= BMCR_FULLDPLX;
3947
3948                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3949                     (bmcr != orig_bmcr)) {
3950                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3951                         for (i = 0; i < 1500; i++) {
3952                                 u32 tmp;
3953
3954                                 udelay(10);
3955                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3956                                     tg3_readphy(tp, MII_BMSR, &tmp))
3957                                         continue;
3958                                 if (!(tmp & BMSR_LSTATUS)) {
3959                                         udelay(40);
3960                                         break;
3961                                 }
3962                         }
3963                         tg3_writephy(tp, MII_BMCR, bmcr);
3964                         udelay(40);
3965                 }
3966         }
3967 }
3968
3969 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3970 {
3971         int err;
3972
3973         /* Turn off tap power management. */
3974         /* Set Extended packet length bit */
3975         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3976
3977         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3978         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3979         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3980         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3981         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3982
3983         udelay(40);
3984
3985         return err;
3986 }
3987
3988 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3989 {
3990         u32 advmsk, tgtadv, advertising;
3991
3992         advertising = tp->link_config.advertising;
3993         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3994
3995         advmsk = ADVERTISE_ALL;
3996         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3997                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3998                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3999         }
4000
4001         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4002                 return false;
4003
4004         if ((*lcladv & advmsk) != tgtadv)
4005                 return false;
4006
4007         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4008                 u32 tg3_ctrl;
4009
4010                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4011
4012                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4013                         return false;
4014
4015                 if (tgtadv &&
4016                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4017                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4018                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4019                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4020                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4021                 } else {
4022                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4023                 }
4024
4025                 if (tg3_ctrl != tgtadv)
4026                         return false;
4027         }
4028
4029         return true;
4030 }
4031
4032 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4033 {
4034         u32 lpeth = 0;
4035
4036         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4037                 u32 val;
4038
4039                 if (tg3_readphy(tp, MII_STAT1000, &val))
4040                         return false;
4041
4042                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4043         }
4044
4045         if (tg3_readphy(tp, MII_LPA, rmtadv))
4046                 return false;
4047
4048         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4049         tp->link_config.rmt_adv = lpeth;
4050
4051         return true;
4052 }
4053
4054 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4055 {
4056         int current_link_up;
4057         u32 bmsr, val;
4058         u32 lcl_adv, rmt_adv;
4059         u16 current_speed;
4060         u8 current_duplex;
4061         int i, err;
4062
4063         tw32(MAC_EVENT, 0);
4064
4065         tw32_f(MAC_STATUS,
4066              (MAC_STATUS_SYNC_CHANGED |
4067               MAC_STATUS_CFG_CHANGED |
4068               MAC_STATUS_MI_COMPLETION |
4069               MAC_STATUS_LNKSTATE_CHANGED));
4070         udelay(40);
4071
4072         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4073                 tw32_f(MAC_MI_MODE,
4074                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4075                 udelay(80);
4076         }
4077
4078         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4079
4080         /* Some third-party PHYs need to be reset on link going
4081          * down.
4082          */
4083         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4084              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4085              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4086             netif_carrier_ok(tp->dev)) {
4087                 tg3_readphy(tp, MII_BMSR, &bmsr);
4088                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4089                     !(bmsr & BMSR_LSTATUS))
4090                         force_reset = 1;
4091         }
4092         if (force_reset)
4093                 tg3_phy_reset(tp);
4094
4095         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4096                 tg3_readphy(tp, MII_BMSR, &bmsr);
4097                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4098                     !tg3_flag(tp, INIT_COMPLETE))
4099                         bmsr = 0;
4100
4101                 if (!(bmsr & BMSR_LSTATUS)) {
4102                         err = tg3_init_5401phy_dsp(tp);
4103                         if (err)
4104                                 return err;
4105
4106                         tg3_readphy(tp, MII_BMSR, &bmsr);
4107                         for (i = 0; i < 1000; i++) {
4108                                 udelay(10);
4109                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4110                                     (bmsr & BMSR_LSTATUS)) {
4111                                         udelay(40);
4112                                         break;
4113                                 }
4114                         }
4115
4116                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4117                             TG3_PHY_REV_BCM5401_B0 &&
4118                             !(bmsr & BMSR_LSTATUS) &&
4119                             tp->link_config.active_speed == SPEED_1000) {
4120                                 err = tg3_phy_reset(tp);
4121                                 if (!err)
4122                                         err = tg3_init_5401phy_dsp(tp);
4123                                 if (err)
4124                                         return err;
4125                         }
4126                 }
4127         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4128                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4129                 /* 5701 {A0,B0} CRC bug workaround */
4130                 tg3_writephy(tp, 0x15, 0x0a75);
4131                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4132                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4133                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4134         }
4135
4136         /* Clear pending interrupts... */
4137         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4139
4140         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4141                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4142         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4143                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4144
4145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4147                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4148                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4149                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4150                 else
4151                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4152         }
4153
4154         current_link_up = 0;
4155         current_speed = SPEED_UNKNOWN;
4156         current_duplex = DUPLEX_UNKNOWN;
4157         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4158         tp->link_config.rmt_adv = 0;
4159
4160         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4161                 err = tg3_phy_auxctl_read(tp,
4162                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4163                                           &val);
4164                 if (!err && !(val & (1 << 10))) {
4165                         tg3_phy_auxctl_write(tp,
4166                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4167                                              val | (1 << 10));
4168                         goto relink;
4169                 }
4170         }
4171
4172         bmsr = 0;
4173         for (i = 0; i < 100; i++) {
4174                 tg3_readphy(tp, MII_BMSR, &bmsr);
4175                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4176                     (bmsr & BMSR_LSTATUS))
4177                         break;
4178                 udelay(40);
4179         }
4180
4181         if (bmsr & BMSR_LSTATUS) {
4182                 u32 aux_stat, bmcr;
4183
4184                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4185                 for (i = 0; i < 2000; i++) {
4186                         udelay(10);
4187                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4188                             aux_stat)
4189                                 break;
4190                 }
4191
4192                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4193                                              &current_speed,
4194                                              &current_duplex);
4195
4196                 bmcr = 0;
4197                 for (i = 0; i < 200; i++) {
4198                         tg3_readphy(tp, MII_BMCR, &bmcr);
4199                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4200                                 continue;
4201                         if (bmcr && bmcr != 0x7fff)
4202                                 break;
4203                         udelay(10);
4204                 }
4205
4206                 lcl_adv = 0;
4207                 rmt_adv = 0;
4208
4209                 tp->link_config.active_speed = current_speed;
4210                 tp->link_config.active_duplex = current_duplex;
4211
4212                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4213                         if ((bmcr & BMCR_ANENABLE) &&
4214                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4215                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4216                                 current_link_up = 1;
4217                 } else {
4218                         if (!(bmcr & BMCR_ANENABLE) &&
4219                             tp->link_config.speed == current_speed &&
4220                             tp->link_config.duplex == current_duplex &&
4221                             tp->link_config.flowctrl ==
4222                             tp->link_config.active_flowctrl) {
4223                                 current_link_up = 1;
4224                         }
4225                 }
4226
4227                 if (current_link_up == 1 &&
4228                     tp->link_config.active_duplex == DUPLEX_FULL) {
4229                         u32 reg, bit;
4230
4231                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4232                                 reg = MII_TG3_FET_GEN_STAT;
4233                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4234                         } else {
4235                                 reg = MII_TG3_EXT_STAT;
4236                                 bit = MII_TG3_EXT_STAT_MDIX;
4237                         }
4238
4239                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4240                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4241
4242                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4243                 }
4244         }
4245
4246 relink:
4247         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248                 tg3_phy_copper_begin(tp);
4249
4250                 tg3_readphy(tp, MII_BMSR, &bmsr);
4251                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4252                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4253                         current_link_up = 1;
4254         }
4255
4256         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4257         if (current_link_up == 1) {
4258                 if (tp->link_config.active_speed == SPEED_100 ||
4259                     tp->link_config.active_speed == SPEED_10)
4260                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4261                 else
4262                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4263         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4264                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4265         else
4266                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4267
4268         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4269         if (tp->link_config.active_duplex == DUPLEX_HALF)
4270                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4271
4272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4273                 if (current_link_up == 1 &&
4274                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4275                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4276                 else
4277                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4278         }
4279
4280         /* ??? Without this setting Netgear GA302T PHY does not
4281          * ??? send/receive packets...
4282          */
4283         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4284             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4285                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4286                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4287                 udelay(80);
4288         }
4289
4290         tw32_f(MAC_MODE, tp->mac_mode);
4291         udelay(40);
4292
4293         tg3_phy_eee_adjust(tp, current_link_up);
4294
4295         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4296                 /* Polled via timer. */
4297                 tw32_f(MAC_EVENT, 0);
4298         } else {
4299                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4300         }
4301         udelay(40);
4302
4303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4304             current_link_up == 1 &&
4305             tp->link_config.active_speed == SPEED_1000 &&
4306             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4307                 udelay(120);
4308                 tw32_f(MAC_STATUS,
4309                      (MAC_STATUS_SYNC_CHANGED |
4310                       MAC_STATUS_CFG_CHANGED));
4311                 udelay(40);
4312                 tg3_write_mem(tp,
4313                               NIC_SRAM_FIRMWARE_MBOX,
4314                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4315         }
4316
4317         /* Prevent send BD corruption. */
4318         if (tg3_flag(tp, CLKREQ_BUG)) {
4319                 u16 oldlnkctl, newlnkctl;
4320
4321                 pci_read_config_word(tp->pdev,
4322                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4323                                      &oldlnkctl);
4324                 if (tp->link_config.active_speed == SPEED_100 ||
4325                     tp->link_config.active_speed == SPEED_10)
4326                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4327                 else
4328                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4329                 if (newlnkctl != oldlnkctl)
4330                         pci_write_config_word(tp->pdev,
4331                                               pci_pcie_cap(tp->pdev) +
4332                                               PCI_EXP_LNKCTL, newlnkctl);
4333         }
4334
4335         if (current_link_up != netif_carrier_ok(tp->dev)) {
4336                 if (current_link_up)
4337                         netif_carrier_on(tp->dev);
4338                 else
4339                         netif_carrier_off(tp->dev);
4340                 tg3_link_report(tp);
4341         }
4342
4343         return 0;
4344 }
4345
4346 struct tg3_fiber_aneginfo {
4347         int state;
4348 #define ANEG_STATE_UNKNOWN              0
4349 #define ANEG_STATE_AN_ENABLE            1
4350 #define ANEG_STATE_RESTART_INIT         2
4351 #define ANEG_STATE_RESTART              3
4352 #define ANEG_STATE_DISABLE_LINK_OK      4
4353 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4354 #define ANEG_STATE_ABILITY_DETECT       6
4355 #define ANEG_STATE_ACK_DETECT_INIT      7
4356 #define ANEG_STATE_ACK_DETECT           8
4357 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4358 #define ANEG_STATE_COMPLETE_ACK         10
4359 #define ANEG_STATE_IDLE_DETECT_INIT     11
4360 #define ANEG_STATE_IDLE_DETECT          12
4361 #define ANEG_STATE_LINK_OK              13
4362 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4363 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4364
4365         u32 flags;
4366 #define MR_AN_ENABLE            0x00000001
4367 #define MR_RESTART_AN           0x00000002
4368 #define MR_AN_COMPLETE          0x00000004
4369 #define MR_PAGE_RX              0x00000008
4370 #define MR_NP_LOADED            0x00000010
4371 #define MR_TOGGLE_TX            0x00000020
4372 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4373 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4374 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4375 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4376 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4377 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4378 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4379 #define MR_TOGGLE_RX            0x00002000
4380 #define MR_NP_RX                0x00004000
4381
4382 #define MR_LINK_OK              0x80000000
4383
4384         unsigned long link_time, cur_time;
4385
4386         u32 ability_match_cfg;
4387         int ability_match_count;
4388
4389         char ability_match, idle_match, ack_match;
4390
4391         u32 txconfig, rxconfig;
4392 #define ANEG_CFG_NP             0x00000080
4393 #define ANEG_CFG_ACK            0x00000040
4394 #define ANEG_CFG_RF2            0x00000020
4395 #define ANEG_CFG_RF1            0x00000010
4396 #define ANEG_CFG_PS2            0x00000001
4397 #define ANEG_CFG_PS1            0x00008000
4398 #define ANEG_CFG_HD             0x00004000
4399 #define ANEG_CFG_FD             0x00002000
4400 #define ANEG_CFG_INVAL          0x00001f06
4401
4402 };
4403 #define ANEG_OK         0
4404 #define ANEG_DONE       1
4405 #define ANEG_TIMER_ENAB 2
4406 #define ANEG_FAILED     -1
4407
4408 #define ANEG_STATE_SETTLE_TIME  10000
4409
4410 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4411                                    struct tg3_fiber_aneginfo *ap)
4412 {
4413         u16 flowctrl;
4414         unsigned long delta;
4415         u32 rx_cfg_reg;
4416         int ret;
4417
4418         if (ap->state == ANEG_STATE_UNKNOWN) {
4419                 ap->rxconfig = 0;
4420                 ap->link_time = 0;
4421                 ap->cur_time = 0;
4422                 ap->ability_match_cfg = 0;
4423                 ap->ability_match_count = 0;
4424                 ap->ability_match = 0;
4425                 ap->idle_match = 0;
4426                 ap->ack_match = 0;
4427         }
4428         ap->cur_time++;
4429
4430         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4431                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4432
4433                 if (rx_cfg_reg != ap->ability_match_cfg) {
4434                         ap->ability_match_cfg = rx_cfg_reg;
4435                         ap->ability_match = 0;
4436                         ap->ability_match_count = 0;
4437                 } else {
4438                         if (++ap->ability_match_count > 1) {
4439                                 ap->ability_match = 1;
4440                                 ap->ability_match_cfg = rx_cfg_reg;
4441                         }
4442                 }
4443                 if (rx_cfg_reg & ANEG_CFG_ACK)
4444                         ap->ack_match = 1;
4445                 else
4446                         ap->ack_match = 0;
4447
4448                 ap->idle_match = 0;
4449         } else {
4450                 ap->idle_match = 1;
4451                 ap->ability_match_cfg = 0;
4452                 ap->ability_match_count = 0;
4453                 ap->ability_match = 0;
4454                 ap->ack_match = 0;
4455
4456                 rx_cfg_reg = 0;
4457         }
4458
4459         ap->rxconfig = rx_cfg_reg;
4460         ret = ANEG_OK;
4461
4462         switch (ap->state) {
4463         case ANEG_STATE_UNKNOWN:
4464                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4465                         ap->state = ANEG_STATE_AN_ENABLE;
4466
4467                 /* fallthru */
4468         case ANEG_STATE_AN_ENABLE:
4469                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4470                 if (ap->flags & MR_AN_ENABLE) {
4471                         ap->link_time = 0;
4472                         ap->cur_time = 0;
4473                         ap->ability_match_cfg = 0;
4474                         ap->ability_match_count = 0;
4475                         ap->ability_match = 0;
4476                         ap->idle_match = 0;
4477                         ap->ack_match = 0;
4478
4479                         ap->state = ANEG_STATE_RESTART_INIT;
4480                 } else {
4481                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4482                 }
4483                 break;
4484
4485         case ANEG_STATE_RESTART_INIT:
4486                 ap->link_time = ap->cur_time;
4487                 ap->flags &= ~(MR_NP_LOADED);
4488                 ap->txconfig = 0;
4489                 tw32(MAC_TX_AUTO_NEG, 0);
4490                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4491                 tw32_f(MAC_MODE, tp->mac_mode);
4492                 udelay(40);
4493
4494                 ret = ANEG_TIMER_ENAB;
4495                 ap->state = ANEG_STATE_RESTART;
4496
4497                 /* fallthru */
4498         case ANEG_STATE_RESTART:
4499                 delta = ap->cur_time - ap->link_time;
4500                 if (delta > ANEG_STATE_SETTLE_TIME)
4501                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4502                 else
4503                         ret = ANEG_TIMER_ENAB;
4504                 break;
4505
4506         case ANEG_STATE_DISABLE_LINK_OK:
4507                 ret = ANEG_DONE;
4508                 break;
4509
4510         case ANEG_STATE_ABILITY_DETECT_INIT:
4511                 ap->flags &= ~(MR_TOGGLE_TX);
4512                 ap->txconfig = ANEG_CFG_FD;
4513                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4514                 if (flowctrl & ADVERTISE_1000XPAUSE)
4515                         ap->txconfig |= ANEG_CFG_PS1;
4516                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4517                         ap->txconfig |= ANEG_CFG_PS2;
4518                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4519                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4520                 tw32_f(MAC_MODE, tp->mac_mode);
4521                 udelay(40);
4522
4523                 ap->state = ANEG_STATE_ABILITY_DETECT;
4524                 break;
4525
4526         case ANEG_STATE_ABILITY_DETECT:
4527                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4528                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4529                 break;
4530
4531         case ANEG_STATE_ACK_DETECT_INIT:
4532                 ap->txconfig |= ANEG_CFG_ACK;
4533                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535                 tw32_f(MAC_MODE, tp->mac_mode);
4536                 udelay(40);
4537
4538                 ap->state = ANEG_STATE_ACK_DETECT;
4539
4540                 /* fallthru */
4541         case ANEG_STATE_ACK_DETECT:
4542                 if (ap->ack_match != 0) {
4543                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4544                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4545                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4546                         } else {
4547                                 ap->state = ANEG_STATE_AN_ENABLE;
4548                         }
4549                 } else if (ap->ability_match != 0 &&
4550                            ap->rxconfig == 0) {
4551                         ap->state = ANEG_STATE_AN_ENABLE;
4552                 }
4553                 break;
4554
4555         case ANEG_STATE_COMPLETE_ACK_INIT:
4556                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4557                         ret = ANEG_FAILED;
4558                         break;
4559                 }
4560                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4561                                MR_LP_ADV_HALF_DUPLEX |
4562                                MR_LP_ADV_SYM_PAUSE |
4563                                MR_LP_ADV_ASYM_PAUSE |
4564                                MR_LP_ADV_REMOTE_FAULT1 |
4565                                MR_LP_ADV_REMOTE_FAULT2 |
4566                                MR_LP_ADV_NEXT_PAGE |
4567                                MR_TOGGLE_RX |
4568                                MR_NP_RX);
4569                 if (ap->rxconfig & ANEG_CFG_FD)
4570                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4571                 if (ap->rxconfig & ANEG_CFG_HD)
4572                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4573                 if (ap->rxconfig & ANEG_CFG_PS1)
4574                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4575                 if (ap->rxconfig & ANEG_CFG_PS2)
4576                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4577                 if (ap->rxconfig & ANEG_CFG_RF1)
4578                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4579                 if (ap->rxconfig & ANEG_CFG_RF2)
4580                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4581                 if (ap->rxconfig & ANEG_CFG_NP)
4582                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4583
4584                 ap->link_time = ap->cur_time;
4585
4586                 ap->flags ^= (MR_TOGGLE_TX);
4587                 if (ap->rxconfig & 0x0008)
4588                         ap->flags |= MR_TOGGLE_RX;
4589                 if (ap->rxconfig & ANEG_CFG_NP)
4590                         ap->flags |= MR_NP_RX;
4591                 ap->flags |= MR_PAGE_RX;
4592
4593                 ap->state = ANEG_STATE_COMPLETE_ACK;
4594                 ret = ANEG_TIMER_ENAB;
4595                 break;
4596
4597         case ANEG_STATE_COMPLETE_ACK:
4598                 if (ap->ability_match != 0 &&
4599                     ap->rxconfig == 0) {
4600                         ap->state = ANEG_STATE_AN_ENABLE;
4601                         break;
4602                 }
4603                 delta = ap->cur_time - ap->link_time;
4604                 if (delta > ANEG_STATE_SETTLE_TIME) {
4605                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4606                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4607                         } else {
4608                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4609                                     !(ap->flags & MR_NP_RX)) {
4610                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4611                                 } else {
4612                                         ret = ANEG_FAILED;
4613                                 }
4614                         }
4615                 }
4616                 break;
4617
4618         case ANEG_STATE_IDLE_DETECT_INIT:
4619                 ap->link_time = ap->cur_time;
4620                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4621                 tw32_f(MAC_MODE, tp->mac_mode);
4622                 udelay(40);
4623
4624                 ap->state = ANEG_STATE_IDLE_DETECT;
4625                 ret = ANEG_TIMER_ENAB;
4626                 break;
4627
4628         case ANEG_STATE_IDLE_DETECT:
4629                 if (ap->ability_match != 0 &&
4630                     ap->rxconfig == 0) {
4631                         ap->state = ANEG_STATE_AN_ENABLE;
4632                         break;
4633                 }
4634                 delta = ap->cur_time - ap->link_time;
4635                 if (delta > ANEG_STATE_SETTLE_TIME) {
4636                         /* XXX another gem from the Broadcom driver :( */
4637                         ap->state = ANEG_STATE_LINK_OK;
4638                 }
4639                 break;
4640
4641         case ANEG_STATE_LINK_OK:
4642                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4643                 ret = ANEG_DONE;
4644                 break;
4645
4646         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4647                 /* ??? unimplemented */
4648                 break;
4649
4650         case ANEG_STATE_NEXT_PAGE_WAIT:
4651                 /* ??? unimplemented */
4652                 break;
4653
4654         default:
4655                 ret = ANEG_FAILED;
4656                 break;
4657         }
4658
4659         return ret;
4660 }
4661
4662 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4663 {
4664         int res = 0;
4665         struct tg3_fiber_aneginfo aninfo;
4666         int status = ANEG_FAILED;
4667         unsigned int tick;
4668         u32 tmp;
4669
4670         tw32_f(MAC_TX_AUTO_NEG, 0);
4671
4672         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4673         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4674         udelay(40);
4675
4676         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4677         udelay(40);
4678
4679         memset(&aninfo, 0, sizeof(aninfo));
4680         aninfo.flags |= MR_AN_ENABLE;
4681         aninfo.state = ANEG_STATE_UNKNOWN;
4682         aninfo.cur_time = 0;
4683         tick = 0;
4684         while (++tick < 195000) {
4685                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4686                 if (status == ANEG_DONE || status == ANEG_FAILED)
4687                         break;
4688
4689                 udelay(1);
4690         }
4691
4692         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4693         tw32_f(MAC_MODE, tp->mac_mode);
4694         udelay(40);
4695
4696         *txflags = aninfo.txconfig;
4697         *rxflags = aninfo.flags;
4698
4699         if (status == ANEG_DONE &&
4700             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4701                              MR_LP_ADV_FULL_DUPLEX)))
4702                 res = 1;
4703
4704         return res;
4705 }
4706
4707 static void tg3_init_bcm8002(struct tg3 *tp)
4708 {
4709         u32 mac_status = tr32(MAC_STATUS);
4710         int i;
4711
4712         /* Reset when initting first time or we have a link. */
4713         if (tg3_flag(tp, INIT_COMPLETE) &&
4714             !(mac_status & MAC_STATUS_PCS_SYNCED))
4715                 return;
4716
4717         /* Set PLL lock range. */
4718         tg3_writephy(tp, 0x16, 0x8007);
4719
4720         /* SW reset */
4721         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4722
4723         /* Wait for reset to complete. */
4724         /* XXX schedule_timeout() ... */
4725         for (i = 0; i < 500; i++)
4726                 udelay(10);
4727
4728         /* Config mode; select PMA/Ch 1 regs. */
4729         tg3_writephy(tp, 0x10, 0x8411);
4730
4731         /* Enable auto-lock and comdet, select txclk for tx. */
4732         tg3_writephy(tp, 0x11, 0x0a10);
4733
4734         tg3_writephy(tp, 0x18, 0x00a0);
4735         tg3_writephy(tp, 0x16, 0x41ff);
4736
4737         /* Assert and deassert POR. */
4738         tg3_writephy(tp, 0x13, 0x0400);
4739         udelay(40);
4740         tg3_writephy(tp, 0x13, 0x0000);
4741
4742         tg3_writephy(tp, 0x11, 0x0a50);
4743         udelay(40);
4744         tg3_writephy(tp, 0x11, 0x0a10);
4745
4746         /* Wait for signal to stabilize */
4747         /* XXX schedule_timeout() ... */
4748         for (i = 0; i < 15000; i++)
4749                 udelay(10);
4750
4751         /* Deselect the channel register so we can read the PHYID
4752          * later.
4753          */
4754         tg3_writephy(tp, 0x10, 0x8011);
4755 }
4756
4757 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4758 {
4759         u16 flowctrl;
4760         u32 sg_dig_ctrl, sg_dig_status;
4761         u32 serdes_cfg, expected_sg_dig_ctrl;
4762         int workaround, port_a;
4763         int current_link_up;
4764
4765         serdes_cfg = 0;
4766         expected_sg_dig_ctrl = 0;
4767         workaround = 0;
4768         port_a = 1;
4769         current_link_up = 0;
4770
4771         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4772             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4773                 workaround = 1;
4774                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4775                         port_a = 0;
4776
4777                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4778                 /* preserve bits 20-23 for voltage regulator */
4779                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4780         }
4781
4782         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4783
4784         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4785                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4786                         if (workaround) {
4787                                 u32 val = serdes_cfg;
4788
4789                                 if (port_a)
4790                                         val |= 0xc010000;
4791                                 else
4792                                         val |= 0x4010000;
4793                                 tw32_f(MAC_SERDES_CFG, val);
4794                         }
4795
4796                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4797                 }
4798                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4799                         tg3_setup_flow_control(tp, 0, 0);
4800                         current_link_up = 1;
4801                 }
4802                 goto out;
4803         }
4804
4805         /* Want auto-negotiation.  */
4806         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4807
4808         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4809         if (flowctrl & ADVERTISE_1000XPAUSE)
4810                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4811         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4812                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4813
4814         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4815                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4816                     tp->serdes_counter &&
4817                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4818                                     MAC_STATUS_RCVD_CFG)) ==
4819                      MAC_STATUS_PCS_SYNCED)) {
4820                         tp->serdes_counter--;
4821                         current_link_up = 1;
4822                         goto out;
4823                 }
4824 restart_autoneg:
4825                 if (workaround)
4826                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4827                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4828                 udelay(5);
4829                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4830
4831                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4832                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4833         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4834                                  MAC_STATUS_SIGNAL_DET)) {
4835                 sg_dig_status = tr32(SG_DIG_STATUS);
4836                 mac_status = tr32(MAC_STATUS);
4837
4838                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4839                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4840                         u32 local_adv = 0, remote_adv = 0;
4841
4842                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4843                                 local_adv |= ADVERTISE_1000XPAUSE;
4844                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4845                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4846
4847                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4848                                 remote_adv |= LPA_1000XPAUSE;
4849                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4850                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4851
4852                         tp->link_config.rmt_adv =
4853                                            mii_adv_to_ethtool_adv_x(remote_adv);
4854
4855                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4856                         current_link_up = 1;
4857                         tp->serdes_counter = 0;
4858                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4859                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4860                         if (tp->serdes_counter)
4861                                 tp->serdes_counter--;
4862                         else {
4863                                 if (workaround) {
4864                                         u32 val = serdes_cfg;
4865
4866                                         if (port_a)
4867                                                 val |= 0xc010000;
4868                                         else
4869                                                 val |= 0x4010000;
4870
4871                                         tw32_f(MAC_SERDES_CFG, val);
4872                                 }
4873
4874                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4875                                 udelay(40);
4876
4877                                 /* Link parallel detection - link is up */
4878                                 /* only if we have PCS_SYNC and not */
4879                                 /* receiving config code words */
4880                                 mac_status = tr32(MAC_STATUS);
4881                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4882                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4883                                         tg3_setup_flow_control(tp, 0, 0);
4884                                         current_link_up = 1;
4885                                         tp->phy_flags |=
4886                                                 TG3_PHYFLG_PARALLEL_DETECT;
4887                                         tp->serdes_counter =
4888                                                 SERDES_PARALLEL_DET_TIMEOUT;
4889                                 } else
4890                                         goto restart_autoneg;
4891                         }
4892                 }
4893         } else {
4894                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4895                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896         }
4897
4898 out:
4899         return current_link_up;
4900 }
4901
4902 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4903 {
4904         int current_link_up = 0;
4905
4906         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4907                 goto out;
4908
4909         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910                 u32 txflags, rxflags;
4911                 int i;
4912
4913                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4914                         u32 local_adv = 0, remote_adv = 0;
4915
4916                         if (txflags & ANEG_CFG_PS1)
4917                                 local_adv |= ADVERTISE_1000XPAUSE;
4918                         if (txflags & ANEG_CFG_PS2)
4919                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4920
4921                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4922                                 remote_adv |= LPA_1000XPAUSE;
4923                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4924                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4925
4926                         tp->link_config.rmt_adv =
4927                                            mii_adv_to_ethtool_adv_x(remote_adv);
4928
4929                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4930
4931                         current_link_up = 1;
4932                 }
4933                 for (i = 0; i < 30; i++) {
4934                         udelay(20);
4935                         tw32_f(MAC_STATUS,
4936                                (MAC_STATUS_SYNC_CHANGED |
4937                                 MAC_STATUS_CFG_CHANGED));
4938                         udelay(40);
4939                         if ((tr32(MAC_STATUS) &
4940                              (MAC_STATUS_SYNC_CHANGED |
4941                               MAC_STATUS_CFG_CHANGED)) == 0)
4942                                 break;
4943                 }
4944
4945                 mac_status = tr32(MAC_STATUS);
4946                 if (current_link_up == 0 &&
4947                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4948                     !(mac_status & MAC_STATUS_RCVD_CFG))
4949                         current_link_up = 1;
4950         } else {
4951                 tg3_setup_flow_control(tp, 0, 0);
4952
4953                 /* Forcing 1000FD link up. */
4954                 current_link_up = 1;
4955
4956                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4957                 udelay(40);
4958
4959                 tw32_f(MAC_MODE, tp->mac_mode);
4960                 udelay(40);
4961         }
4962
4963 out:
4964         return current_link_up;
4965 }
4966
4967 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4968 {
4969         u32 orig_pause_cfg;
4970         u16 orig_active_speed;
4971         u8 orig_active_duplex;
4972         u32 mac_status;
4973         int current_link_up;
4974         int i;
4975
4976         orig_pause_cfg = tp->link_config.active_flowctrl;
4977         orig_active_speed = tp->link_config.active_speed;
4978         orig_active_duplex = tp->link_config.active_duplex;
4979
4980         if (!tg3_flag(tp, HW_AUTONEG) &&
4981             netif_carrier_ok(tp->dev) &&
4982             tg3_flag(tp, INIT_COMPLETE)) {
4983                 mac_status = tr32(MAC_STATUS);
4984                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4985                                MAC_STATUS_SIGNAL_DET |
4986                                MAC_STATUS_CFG_CHANGED |
4987                                MAC_STATUS_RCVD_CFG);
4988                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4989                                    MAC_STATUS_SIGNAL_DET)) {
4990                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4991                                             MAC_STATUS_CFG_CHANGED));
4992                         return 0;
4993                 }
4994         }
4995
4996         tw32_f(MAC_TX_AUTO_NEG, 0);
4997
4998         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4999         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5000         tw32_f(MAC_MODE, tp->mac_mode);
5001         udelay(40);
5002
5003         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5004                 tg3_init_bcm8002(tp);
5005
5006         /* Enable link change event even when serdes polling.  */
5007         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5008         udelay(40);
5009
5010         current_link_up = 0;
5011         tp->link_config.rmt_adv = 0;
5012         mac_status = tr32(MAC_STATUS);
5013
5014         if (tg3_flag(tp, HW_AUTONEG))
5015                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5016         else
5017                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5018
5019         tp->napi[0].hw_status->status =
5020                 (SD_STATUS_UPDATED |
5021                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5022
5023         for (i = 0; i < 100; i++) {
5024                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5025                                     MAC_STATUS_CFG_CHANGED));
5026                 udelay(5);
5027                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5028                                          MAC_STATUS_CFG_CHANGED |
5029                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5030                         break;
5031         }
5032
5033         mac_status = tr32(MAC_STATUS);
5034         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5035                 current_link_up = 0;
5036                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5037                     tp->serdes_counter == 0) {
5038                         tw32_f(MAC_MODE, (tp->mac_mode |
5039                                           MAC_MODE_SEND_CONFIGS));
5040                         udelay(1);
5041                         tw32_f(MAC_MODE, tp->mac_mode);
5042                 }
5043         }
5044
5045         if (current_link_up == 1) {
5046                 tp->link_config.active_speed = SPEED_1000;
5047                 tp->link_config.active_duplex = DUPLEX_FULL;
5048                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5049                                     LED_CTRL_LNKLED_OVERRIDE |
5050                                     LED_CTRL_1000MBPS_ON));
5051         } else {
5052                 tp->link_config.active_speed = SPEED_UNKNOWN;
5053                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_TRAFFIC_OVERRIDE));
5057         }
5058
5059         if (current_link_up != netif_carrier_ok(tp->dev)) {
5060                 if (current_link_up)
5061                         netif_carrier_on(tp->dev);
5062                 else
5063                         netif_carrier_off(tp->dev);
5064                 tg3_link_report(tp);
5065         } else {
5066                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5067                 if (orig_pause_cfg != now_pause_cfg ||
5068                     orig_active_speed != tp->link_config.active_speed ||
5069                     orig_active_duplex != tp->link_config.active_duplex)
5070                         tg3_link_report(tp);
5071         }
5072
5073         return 0;
5074 }
5075
5076 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5077 {
5078         int current_link_up, err = 0;
5079         u32 bmsr, bmcr;
5080         u16 current_speed;
5081         u8 current_duplex;
5082         u32 local_adv, remote_adv;
5083
5084         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5085         tw32_f(MAC_MODE, tp->mac_mode);
5086         udelay(40);
5087
5088         tw32(MAC_EVENT, 0);
5089
5090         tw32_f(MAC_STATUS,
5091              (MAC_STATUS_SYNC_CHANGED |
5092               MAC_STATUS_CFG_CHANGED |
5093               MAC_STATUS_MI_COMPLETION |
5094               MAC_STATUS_LNKSTATE_CHANGED));
5095         udelay(40);
5096
5097         if (force_reset)
5098                 tg3_phy_reset(tp);
5099
5100         current_link_up = 0;
5101         current_speed = SPEED_UNKNOWN;
5102         current_duplex = DUPLEX_UNKNOWN;
5103         tp->link_config.rmt_adv = 0;
5104
5105         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5108                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5109                         bmsr |= BMSR_LSTATUS;
5110                 else
5111                         bmsr &= ~BMSR_LSTATUS;
5112         }
5113
5114         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5115
5116         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5117             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5118                 /* do nothing, just check for link up at the end */
5119         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5120                 u32 adv, newadv;
5121
5122                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5123                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5124                                  ADVERTISE_1000XPAUSE |
5125                                  ADVERTISE_1000XPSE_ASYM |
5126                                  ADVERTISE_SLCT);
5127
5128                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5129                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5130
5131                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5132                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5133                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5134                         tg3_writephy(tp, MII_BMCR, bmcr);
5135
5136                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5137                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5138                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5139
5140                         return err;
5141                 }
5142         } else {
5143                 u32 new_bmcr;
5144
5145                 bmcr &= ~BMCR_SPEED1000;
5146                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5147
5148                 if (tp->link_config.duplex == DUPLEX_FULL)
5149                         new_bmcr |= BMCR_FULLDPLX;
5150
5151                 if (new_bmcr != bmcr) {
5152                         /* BMCR_SPEED1000 is a reserved bit that needs
5153                          * to be set on write.
5154                          */
5155                         new_bmcr |= BMCR_SPEED1000;
5156
5157                         /* Force a linkdown */
5158                         if (netif_carrier_ok(tp->dev)) {
5159                                 u32 adv;
5160
5161                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5162                                 adv &= ~(ADVERTISE_1000XFULL |
5163                                          ADVERTISE_1000XHALF |
5164                                          ADVERTISE_SLCT);
5165                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5166                                 tg3_writephy(tp, MII_BMCR, bmcr |
5167                                                            BMCR_ANRESTART |
5168                                                            BMCR_ANENABLE);
5169                                 udelay(10);
5170                                 netif_carrier_off(tp->dev);
5171                         }
5172                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5173                         bmcr = new_bmcr;
5174                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5177                             ASIC_REV_5714) {
5178                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5179                                         bmsr |= BMSR_LSTATUS;
5180                                 else
5181                                         bmsr &= ~BMSR_LSTATUS;
5182                         }
5183                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5184                 }
5185         }
5186
5187         if (bmsr & BMSR_LSTATUS) {
5188                 current_speed = SPEED_1000;
5189                 current_link_up = 1;
5190                 if (bmcr & BMCR_FULLDPLX)
5191                         current_duplex = DUPLEX_FULL;
5192                 else
5193                         current_duplex = DUPLEX_HALF;
5194
5195                 local_adv = 0;
5196                 remote_adv = 0;
5197
5198                 if (bmcr & BMCR_ANENABLE) {
5199                         u32 common;
5200
5201                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5202                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5203                         common = local_adv & remote_adv;
5204                         if (common & (ADVERTISE_1000XHALF |
5205                                       ADVERTISE_1000XFULL)) {
5206                                 if (common & ADVERTISE_1000XFULL)
5207                                         current_duplex = DUPLEX_FULL;
5208                                 else
5209                                         current_duplex = DUPLEX_HALF;
5210
5211                                 tp->link_config.rmt_adv =
5212                                            mii_adv_to_ethtool_adv_x(remote_adv);
5213                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5214                                 /* Link is up via parallel detect */
5215                         } else {
5216                                 current_link_up = 0;
5217                         }
5218                 }
5219         }
5220
5221         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5222                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5223
5224         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5225         if (tp->link_config.active_duplex == DUPLEX_HALF)
5226                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5227
5228         tw32_f(MAC_MODE, tp->mac_mode);
5229         udelay(40);
5230
5231         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5232
5233         tp->link_config.active_speed = current_speed;
5234         tp->link_config.active_duplex = current_duplex;
5235
5236         if (current_link_up != netif_carrier_ok(tp->dev)) {
5237                 if (current_link_up)
5238                         netif_carrier_on(tp->dev);
5239                 else {
5240                         netif_carrier_off(tp->dev);
5241                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5242                 }
5243                 tg3_link_report(tp);
5244         }
5245         return err;
5246 }
5247
5248 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5249 {
5250         if (tp->serdes_counter) {
5251                 /* Give autoneg time to complete. */
5252                 tp->serdes_counter--;
5253                 return;
5254         }
5255
5256         if (!netif_carrier_ok(tp->dev) &&
5257             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5258                 u32 bmcr;
5259
5260                 tg3_readphy(tp, MII_BMCR, &bmcr);
5261                 if (bmcr & BMCR_ANENABLE) {
5262                         u32 phy1, phy2;
5263
5264                         /* Select shadow register 0x1f */
5265                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5266                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5267
5268                         /* Select expansion interrupt status register */
5269                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5270                                          MII_TG3_DSP_EXP1_INT_STAT);
5271                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5273
5274                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5275                                 /* We have signal detect and not receiving
5276                                  * config code words, link is up by parallel
5277                                  * detection.
5278                                  */
5279
5280                                 bmcr &= ~BMCR_ANENABLE;
5281                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5282                                 tg3_writephy(tp, MII_BMCR, bmcr);
5283                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5284                         }
5285                 }
5286         } else if (netif_carrier_ok(tp->dev) &&
5287                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5288                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5289                 u32 phy2;
5290
5291                 /* Select expansion interrupt status register */
5292                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5293                                  MII_TG3_DSP_EXP1_INT_STAT);
5294                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5295                 if (phy2 & 0x20) {
5296                         u32 bmcr;
5297
5298                         /* Config code words received, turn on autoneg. */
5299                         tg3_readphy(tp, MII_BMCR, &bmcr);
5300                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5301
5302                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5303
5304                 }
5305         }
5306 }
5307
5308 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5309 {
5310         u32 val;
5311         int err;
5312
5313         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5314                 err = tg3_setup_fiber_phy(tp, force_reset);
5315         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5316                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5317         else
5318                 err = tg3_setup_copper_phy(tp, force_reset);
5319
5320         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5321                 u32 scale;
5322
5323                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5324                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5325                         scale = 65;
5326                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5327                         scale = 6;
5328                 else
5329                         scale = 12;
5330
5331                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5332                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5333                 tw32(GRC_MISC_CFG, val);
5334         }
5335
5336         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5337               (6 << TX_LENGTHS_IPG_SHIFT);
5338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5339                 val |= tr32(MAC_TX_LENGTHS) &
5340                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5341                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5342
5343         if (tp->link_config.active_speed == SPEED_1000 &&
5344             tp->link_config.active_duplex == DUPLEX_HALF)
5345                 tw32(MAC_TX_LENGTHS, val |
5346                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5347         else
5348                 tw32(MAC_TX_LENGTHS, val |
5349                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5350
5351         if (!tg3_flag(tp, 5705_PLUS)) {
5352                 if (netif_carrier_ok(tp->dev)) {
5353                         tw32(HOSTCC_STAT_COAL_TICKS,
5354                              tp->coal.stats_block_coalesce_usecs);
5355                 } else {
5356                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5357                 }
5358         }
5359
5360         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5361                 val = tr32(PCIE_PWR_MGMT_THRESH);
5362                 if (!netif_carrier_ok(tp->dev))
5363                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5364                               tp->pwrmgmt_thresh;
5365                 else
5366                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5367                 tw32(PCIE_PWR_MGMT_THRESH, val);
5368         }
5369
5370         return err;
5371 }
5372
5373 static inline int tg3_irq_sync(struct tg3 *tp)
5374 {
5375         return tp->irq_sync;
5376 }
5377
5378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5379 {
5380         int i;
5381
5382         dst = (u32 *)((u8 *)dst + off);
5383         for (i = 0; i < len; i += sizeof(u32))
5384                 *dst++ = tr32(off + i);
5385 }
5386
5387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5388 {
5389         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5390         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5391         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5392         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5393         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5394         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5395         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5396         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5397         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5398         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5399         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5400         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5401         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5402         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5405         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5406         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5407         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5408
5409         if (tg3_flag(tp, SUPPORT_MSIX))
5410                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5411
5412         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5413         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5414         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5415         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5416         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5417         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5418         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5419         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5420
5421         if (!tg3_flag(tp, 5705_PLUS)) {
5422                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5423                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5424                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5425         }
5426
5427         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5428         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5429         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5430         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5431         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5432
5433         if (tg3_flag(tp, NVRAM))
5434                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5435 }
5436
5437 static void tg3_dump_state(struct tg3 *tp)
5438 {
5439         int i;
5440         u32 *regs;
5441
5442         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5443         if (!regs) {
5444                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5445                 return;
5446         }
5447
5448         if (tg3_flag(tp, PCI_EXPRESS)) {
5449                 /* Read up to but not including private PCI registers */
5450                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5451                         regs[i / sizeof(u32)] = tr32(i);
5452         } else
5453                 tg3_dump_legacy_regs(tp, regs);
5454
5455         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5456                 if (!regs[i + 0] && !regs[i + 1] &&
5457                     !regs[i + 2] && !regs[i + 3])
5458                         continue;
5459
5460                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5461                            i * 4,
5462                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5463         }
5464
5465         kfree(regs);
5466
5467         for (i = 0; i < tp->irq_cnt; i++) {
5468                 struct tg3_napi *tnapi = &tp->napi[i];
5469
5470                 /* SW status block */
5471                 netdev_err(tp->dev,
5472                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5473                            i,
5474                            tnapi->hw_status->status,
5475                            tnapi->hw_status->status_tag,
5476                            tnapi->hw_status->rx_jumbo_consumer,
5477                            tnapi->hw_status->rx_consumer,
5478                            tnapi->hw_status->rx_mini_consumer,
5479                            tnapi->hw_status->idx[0].rx_producer,
5480                            tnapi->hw_status->idx[0].tx_consumer);
5481
5482                 netdev_err(tp->dev,
5483                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5484                            i,
5485                            tnapi->last_tag, tnapi->last_irq_tag,
5486                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5487                            tnapi->rx_rcb_ptr,
5488                            tnapi->prodring.rx_std_prod_idx,
5489                            tnapi->prodring.rx_std_cons_idx,
5490                            tnapi->prodring.rx_jmb_prod_idx,
5491                            tnapi->prodring.rx_jmb_cons_idx);
5492         }
5493 }
5494
5495 /* This is called whenever we suspect that the system chipset is re-
5496  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5497  * is bogus tx completions. We try to recover by setting the
5498  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5499  * in the workqueue.
5500  */
5501 static void tg3_tx_recover(struct tg3 *tp)
5502 {
5503         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5504                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5505
5506         netdev_warn(tp->dev,
5507                     "The system may be re-ordering memory-mapped I/O "
5508                     "cycles to the network device, attempting to recover. "
5509                     "Please report the problem to the driver maintainer "
5510                     "and include system chipset information.\n");
5511
5512         spin_lock(&tp->lock);
5513         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5514         spin_unlock(&tp->lock);
5515 }
5516
5517 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5518 {
5519         /* Tell compiler to fetch tx indices from memory. */
5520         barrier();
5521         return tnapi->tx_pending -
5522                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5523 }
5524
5525 /* Tigon3 never reports partial packet sends.  So we do not
5526  * need special logic to handle SKBs that have not had all
5527  * of their frags sent yet, like SunGEM does.
5528  */
5529 static void tg3_tx(struct tg3_napi *tnapi)
5530 {
5531         struct tg3 *tp = tnapi->tp;
5532         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5533         u32 sw_idx = tnapi->tx_cons;
5534         struct netdev_queue *txq;
5535         int index = tnapi - tp->napi;
5536         unsigned int pkts_compl = 0, bytes_compl = 0;
5537
5538         if (tg3_flag(tp, ENABLE_TSS))
5539                 index--;
5540
5541         txq = netdev_get_tx_queue(tp->dev, index);
5542
5543         while (sw_idx != hw_idx) {
5544                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5545                 struct sk_buff *skb = ri->skb;
5546                 int i, tx_bug = 0;
5547
5548                 if (unlikely(skb == NULL)) {
5549                         tg3_tx_recover(tp);
5550                         return;
5551                 }
5552
5553                 pci_unmap_single(tp->pdev,
5554                                  dma_unmap_addr(ri, mapping),
5555                                  skb_headlen(skb),
5556                                  PCI_DMA_TODEVICE);
5557
5558                 ri->skb = NULL;
5559
5560                 while (ri->fragmented) {
5561                         ri->fragmented = false;
5562                         sw_idx = NEXT_TX(sw_idx);
5563                         ri = &tnapi->tx_buffers[sw_idx];
5564                 }
5565
5566                 sw_idx = NEXT_TX(sw_idx);
5567
5568                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5571                                 tx_bug = 1;
5572
5573                         pci_unmap_page(tp->pdev,
5574                                        dma_unmap_addr(ri, mapping),
5575                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5576                                        PCI_DMA_TODEVICE);
5577
5578                         while (ri->fragmented) {
5579                                 ri->fragmented = false;
5580                                 sw_idx = NEXT_TX(sw_idx);
5581                                 ri = &tnapi->tx_buffers[sw_idx];
5582                         }
5583
5584                         sw_idx = NEXT_TX(sw_idx);
5585                 }
5586
5587                 pkts_compl++;
5588                 bytes_compl += skb->len;
5589
5590                 dev_kfree_skb(skb);
5591
5592                 if (unlikely(tx_bug)) {
5593                         tg3_tx_recover(tp);
5594                         return;
5595                 }
5596         }
5597
5598         netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5599
5600         tnapi->tx_cons = sw_idx;
5601
5602         /* Need to make the tx_cons update visible to tg3_start_xmit()
5603          * before checking for netif_queue_stopped().  Without the
5604          * memory barrier, there is a small possibility that tg3_start_xmit()
5605          * will miss it and cause the queue to be stopped forever.
5606          */
5607         smp_mb();
5608
5609         if (unlikely(netif_tx_queue_stopped(txq) &&
5610                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5611                 __netif_tx_lock(txq, smp_processor_id());
5612                 if (netif_tx_queue_stopped(txq) &&
5613                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5614                         netif_tx_wake_queue(txq);
5615                 __netif_tx_unlock(txq);
5616         }
5617 }
5618
5619 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5620 {
5621         if (!ri->data)
5622                 return;
5623
5624         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5625                          map_sz, PCI_DMA_FROMDEVICE);
5626         kfree(ri->data);
5627         ri->data = NULL;
5628 }
5629
5630 /* Returns size of skb allocated or < 0 on error.
5631  *
5632  * We only need to fill in the address because the other members
5633  * of the RX descriptor are invariant, see tg3_init_rings.
5634  *
5635  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5636  * posting buffers we only dirty the first cache line of the RX
5637  * descriptor (containing the address).  Whereas for the RX status
5638  * buffers the cpu only reads the last cacheline of the RX descriptor
5639  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5640  */
5641 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5642                             u32 opaque_key, u32 dest_idx_unmasked)
5643 {
5644         struct tg3_rx_buffer_desc *desc;
5645         struct ring_info *map;
5646         u8 *data;
5647         dma_addr_t mapping;
5648         int skb_size, data_size, dest_idx;
5649
5650         switch (opaque_key) {
5651         case RXD_OPAQUE_RING_STD:
5652                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5653                 desc = &tpr->rx_std[dest_idx];
5654                 map = &tpr->rx_std_buffers[dest_idx];
5655                 data_size = tp->rx_pkt_map_sz;
5656                 break;
5657
5658         case RXD_OPAQUE_RING_JUMBO:
5659                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5660                 desc = &tpr->rx_jmb[dest_idx].std;
5661                 map = &tpr->rx_jmb_buffers[dest_idx];
5662                 data_size = TG3_RX_JMB_MAP_SZ;
5663                 break;
5664
5665         default:
5666                 return -EINVAL;
5667         }
5668
5669         /* Do not overwrite any of the map or rp information
5670          * until we are sure we can commit to a new buffer.
5671          *
5672          * Callers depend upon this behavior and assume that
5673          * we leave everything unchanged if we fail.
5674          */
5675         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5676                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5677         data = kmalloc(skb_size, GFP_ATOMIC);
5678         if (!data)
5679                 return -ENOMEM;
5680
5681         mapping = pci_map_single(tp->pdev,
5682                                  data + TG3_RX_OFFSET(tp),
5683                                  data_size,
5684                                  PCI_DMA_FROMDEVICE);
5685         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5686                 kfree(data);
5687                 return -EIO;
5688         }
5689
5690         map->data = data;
5691         dma_unmap_addr_set(map, mapping, mapping);
5692
5693         desc->addr_hi = ((u64)mapping >> 32);
5694         desc->addr_lo = ((u64)mapping & 0xffffffff);
5695
5696         return data_size;
5697 }
5698
5699 /* We only need to move over in the address because the other
5700  * members of the RX descriptor are invariant.  See notes above
5701  * tg3_alloc_rx_data for full details.
5702  */
5703 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5704                            struct tg3_rx_prodring_set *dpr,
5705                            u32 opaque_key, int src_idx,
5706                            u32 dest_idx_unmasked)
5707 {
5708         struct tg3 *tp = tnapi->tp;
5709         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5710         struct ring_info *src_map, *dest_map;
5711         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5712         int dest_idx;
5713
5714         switch (opaque_key) {
5715         case RXD_OPAQUE_RING_STD:
5716                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5717                 dest_desc = &dpr->rx_std[dest_idx];
5718                 dest_map = &dpr->rx_std_buffers[dest_idx];
5719                 src_desc = &spr->rx_std[src_idx];
5720                 src_map = &spr->rx_std_buffers[src_idx];
5721                 break;
5722
5723         case RXD_OPAQUE_RING_JUMBO:
5724                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5725                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5726                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5727                 src_desc = &spr->rx_jmb[src_idx].std;
5728                 src_map = &spr->rx_jmb_buffers[src_idx];
5729                 break;
5730
5731         default:
5732                 return;
5733         }
5734
5735         dest_map->data = src_map->data;
5736         dma_unmap_addr_set(dest_map, mapping,
5737                            dma_unmap_addr(src_map, mapping));
5738         dest_desc->addr_hi = src_desc->addr_hi;
5739         dest_desc->addr_lo = src_desc->addr_lo;
5740
5741         /* Ensure that the update to the skb happens after the physical
5742          * addresses have been transferred to the new BD location.
5743          */
5744         smp_wmb();
5745
5746         src_map->data = NULL;
5747 }
5748
5749 /* The RX ring scheme is composed of multiple rings which post fresh
5750  * buffers to the chip, and one special ring the chip uses to report
5751  * status back to the host.
5752  *
5753  * The special ring reports the status of received packets to the
5754  * host.  The chip does not write into the original descriptor the
5755  * RX buffer was obtained from.  The chip simply takes the original
5756  * descriptor as provided by the host, updates the status and length
5757  * field, then writes this into the next status ring entry.
5758  *
5759  * Each ring the host uses to post buffers to the chip is described
5760  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5761  * it is first placed into the on-chip ram.  When the packet's length
5762  * is known, it walks down the TG3_BDINFO entries to select the ring.
5763  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5764  * which is within the range of the new packet's length is chosen.
5765  *
5766  * The "separate ring for rx status" scheme may sound queer, but it makes
5767  * sense from a cache coherency perspective.  If only the host writes
5768  * to the buffer post rings, and only the chip writes to the rx status
5769  * rings, then cache lines never move beyond shared-modified state.
5770  * If both the host and chip were to write into the same ring, cache line
5771  * eviction could occur since both entities want it in an exclusive state.
5772  */
5773 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5774 {
5775         struct tg3 *tp = tnapi->tp;
5776         u32 work_mask, rx_std_posted = 0;
5777         u32 std_prod_idx, jmb_prod_idx;
5778         u32 sw_idx = tnapi->rx_rcb_ptr;
5779         u16 hw_idx;
5780         int received;
5781         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5782
5783         hw_idx = *(tnapi->rx_rcb_prod_idx);
5784         /*
5785          * We need to order the read of hw_idx and the read of
5786          * the opaque cookie.
5787          */
5788         rmb();
5789         work_mask = 0;
5790         received = 0;
5791         std_prod_idx = tpr->rx_std_prod_idx;
5792         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5793         while (sw_idx != hw_idx && budget > 0) {
5794                 struct ring_info *ri;
5795                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5796                 unsigned int len;
5797                 struct sk_buff *skb;
5798                 dma_addr_t dma_addr;
5799                 u32 opaque_key, desc_idx, *post_ptr;
5800                 u8 *data;
5801
5802                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5803                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5804                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5805                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5806                         dma_addr = dma_unmap_addr(ri, mapping);
5807                         data = ri->data;
5808                         post_ptr = &std_prod_idx;
5809                         rx_std_posted++;
5810                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5811                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5812                         dma_addr = dma_unmap_addr(ri, mapping);
5813                         data = ri->data;
5814                         post_ptr = &jmb_prod_idx;
5815                 } else
5816                         goto next_pkt_nopost;
5817
5818                 work_mask |= opaque_key;
5819
5820                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5821                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5822                 drop_it:
5823                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5824                                        desc_idx, *post_ptr);
5825                 drop_it_no_recycle:
5826                         /* Other statistics kept track of by card. */
5827                         tp->rx_dropped++;
5828                         goto next_pkt;
5829                 }
5830
5831                 prefetch(data + TG3_RX_OFFSET(tp));
5832                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5833                       ETH_FCS_LEN;
5834
5835                 if (len > TG3_RX_COPY_THRESH(tp)) {
5836                         int skb_size;
5837
5838                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5839                                                     *post_ptr);
5840                         if (skb_size < 0)
5841                                 goto drop_it;
5842
5843                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5844                                          PCI_DMA_FROMDEVICE);
5845
5846                         skb = build_skb(data);
5847                         if (!skb) {
5848                                 kfree(data);
5849                                 goto drop_it_no_recycle;
5850                         }
5851                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5852                         /* Ensure that the update to the data happens
5853                          * after the usage of the old DMA mapping.
5854                          */
5855                         smp_wmb();
5856
5857                         ri->data = NULL;
5858
5859                 } else {
5860                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5861                                        desc_idx, *post_ptr);
5862
5863                         skb = netdev_alloc_skb(tp->dev,
5864                                                len + TG3_RAW_IP_ALIGN);
5865                         if (skb == NULL)
5866                                 goto drop_it_no_recycle;
5867
5868                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5869                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5870                         memcpy(skb->data,
5871                                data + TG3_RX_OFFSET(tp),
5872                                len);
5873                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5874                 }
5875
5876                 skb_put(skb, len);
5877                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5878                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5879                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5880                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5881                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5882                 else
5883                         skb_checksum_none_assert(skb);
5884
5885                 skb->protocol = eth_type_trans(skb, tp->dev);
5886
5887                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5888                     skb->protocol != htons(ETH_P_8021Q)) {
5889                         dev_kfree_skb(skb);
5890                         goto drop_it_no_recycle;
5891                 }
5892
5893                 if (desc->type_flags & RXD_FLAG_VLAN &&
5894                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5895                         __vlan_hwaccel_put_tag(skb,
5896                                                desc->err_vlan & RXD_VLAN_MASK);
5897
5898                 napi_gro_receive(&tnapi->napi, skb);
5899
5900                 received++;
5901                 budget--;
5902
5903 next_pkt:
5904                 (*post_ptr)++;
5905
5906                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5907                         tpr->rx_std_prod_idx = std_prod_idx &
5908                                                tp->rx_std_ring_mask;
5909                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5910                                      tpr->rx_std_prod_idx);
5911                         work_mask &= ~RXD_OPAQUE_RING_STD;
5912                         rx_std_posted = 0;
5913                 }
5914 next_pkt_nopost:
5915                 sw_idx++;
5916                 sw_idx &= tp->rx_ret_ring_mask;
5917
5918                 /* Refresh hw_idx to see if there is new work */
5919                 if (sw_idx == hw_idx) {
5920                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5921                         rmb();
5922                 }
5923         }
5924
5925         /* ACK the status ring. */
5926         tnapi->rx_rcb_ptr = sw_idx;
5927         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5928
5929         /* Refill RX ring(s). */
5930         if (!tg3_flag(tp, ENABLE_RSS)) {
5931                 if (work_mask & RXD_OPAQUE_RING_STD) {
5932                         tpr->rx_std_prod_idx = std_prod_idx &
5933                                                tp->rx_std_ring_mask;
5934                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5935                                      tpr->rx_std_prod_idx);
5936                 }
5937                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5938                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5939                                                tp->rx_jmb_ring_mask;
5940                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5941                                      tpr->rx_jmb_prod_idx);
5942                 }
5943                 mmiowb();
5944         } else if (work_mask) {
5945                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5946                  * updated before the producer indices can be updated.
5947                  */
5948                 smp_wmb();
5949
5950                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5951                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5952
5953                 if (tnapi != &tp->napi[1])
5954                         napi_schedule(&tp->napi[1].napi);
5955         }
5956
5957         return received;
5958 }
5959
5960 static void tg3_poll_link(struct tg3 *tp)
5961 {
5962         /* handle link change and other phy events */
5963         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5964                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5965
5966                 if (sblk->status & SD_STATUS_LINK_CHG) {
5967                         sblk->status = SD_STATUS_UPDATED |
5968                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5969                         spin_lock(&tp->lock);
5970                         if (tg3_flag(tp, USE_PHYLIB)) {
5971                                 tw32_f(MAC_STATUS,
5972                                      (MAC_STATUS_SYNC_CHANGED |
5973                                       MAC_STATUS_CFG_CHANGED |
5974                                       MAC_STATUS_MI_COMPLETION |
5975                                       MAC_STATUS_LNKSTATE_CHANGED));
5976                                 udelay(40);
5977                         } else
5978                                 tg3_setup_phy(tp, 0);
5979                         spin_unlock(&tp->lock);
5980                 }
5981         }
5982 }
5983
5984 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5985                                 struct tg3_rx_prodring_set *dpr,
5986                                 struct tg3_rx_prodring_set *spr)
5987 {
5988         u32 si, di, cpycnt, src_prod_idx;
5989         int i, err = 0;
5990
5991         while (1) {
5992                 src_prod_idx = spr->rx_std_prod_idx;
5993
5994                 /* Make sure updates to the rx_std_buffers[] entries and the
5995                  * standard producer index are seen in the correct order.
5996                  */
5997                 smp_rmb();
5998
5999                 if (spr->rx_std_cons_idx == src_prod_idx)
6000                         break;
6001
6002                 if (spr->rx_std_cons_idx < src_prod_idx)
6003                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6004                 else
6005                         cpycnt = tp->rx_std_ring_mask + 1 -
6006                                  spr->rx_std_cons_idx;
6007
6008                 cpycnt = min(cpycnt,
6009                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6010
6011                 si = spr->rx_std_cons_idx;
6012                 di = dpr->rx_std_prod_idx;
6013
6014                 for (i = di; i < di + cpycnt; i++) {
6015                         if (dpr->rx_std_buffers[i].data) {
6016                                 cpycnt = i - di;
6017                                 err = -ENOSPC;
6018                                 break;
6019                         }
6020                 }
6021
6022                 if (!cpycnt)
6023                         break;
6024
6025                 /* Ensure that updates to the rx_std_buffers ring and the
6026                  * shadowed hardware producer ring from tg3_recycle_skb() are
6027                  * ordered correctly WRT the skb check above.
6028                  */
6029                 smp_rmb();
6030
6031                 memcpy(&dpr->rx_std_buffers[di],
6032                        &spr->rx_std_buffers[si],
6033                        cpycnt * sizeof(struct ring_info));
6034
6035                 for (i = 0; i < cpycnt; i++, di++, si++) {
6036                         struct tg3_rx_buffer_desc *sbd, *dbd;
6037                         sbd = &spr->rx_std[si];
6038                         dbd = &dpr->rx_std[di];
6039                         dbd->addr_hi = sbd->addr_hi;
6040                         dbd->addr_lo = sbd->addr_lo;
6041                 }
6042
6043                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6044                                        tp->rx_std_ring_mask;
6045                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6046                                        tp->rx_std_ring_mask;
6047         }
6048
6049         while (1) {
6050                 src_prod_idx = spr->rx_jmb_prod_idx;
6051
6052                 /* Make sure updates to the rx_jmb_buffers[] entries and
6053                  * the jumbo producer index are seen in the correct order.
6054                  */
6055                 smp_rmb();
6056
6057                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6058                         break;
6059
6060                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6061                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6062                 else
6063                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6064                                  spr->rx_jmb_cons_idx;
6065
6066                 cpycnt = min(cpycnt,
6067                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6068
6069                 si = spr->rx_jmb_cons_idx;
6070                 di = dpr->rx_jmb_prod_idx;
6071
6072                 for (i = di; i < di + cpycnt; i++) {
6073                         if (dpr->rx_jmb_buffers[i].data) {
6074                                 cpycnt = i - di;
6075                                 err = -ENOSPC;
6076                                 break;
6077                         }
6078                 }
6079
6080                 if (!cpycnt)
6081                         break;
6082
6083                 /* Ensure that updates to the rx_jmb_buffers ring and the
6084                  * shadowed hardware producer ring from tg3_recycle_skb() are
6085                  * ordered correctly WRT the skb check above.
6086                  */
6087                 smp_rmb();
6088
6089                 memcpy(&dpr->rx_jmb_buffers[di],
6090                        &spr->rx_jmb_buffers[si],
6091                        cpycnt * sizeof(struct ring_info));
6092
6093                 for (i = 0; i < cpycnt; i++, di++, si++) {
6094                         struct tg3_rx_buffer_desc *sbd, *dbd;
6095                         sbd = &spr->rx_jmb[si].std;
6096                         dbd = &dpr->rx_jmb[di].std;
6097                         dbd->addr_hi = sbd->addr_hi;
6098                         dbd->addr_lo = sbd->addr_lo;
6099                 }
6100
6101                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6102                                        tp->rx_jmb_ring_mask;
6103                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6104                                        tp->rx_jmb_ring_mask;
6105         }
6106
6107         return err;
6108 }
6109
6110 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6111 {
6112         struct tg3 *tp = tnapi->tp;
6113
6114         /* run TX completion thread */
6115         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6116                 tg3_tx(tnapi);
6117                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6118                         return work_done;
6119         }
6120
6121         /* run RX thread, within the bounds set by NAPI.
6122          * All RX "locking" is done by ensuring outside
6123          * code synchronizes with tg3->napi.poll()
6124          */
6125         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6126                 work_done += tg3_rx(tnapi, budget - work_done);
6127
6128         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6129                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6130                 int i, err = 0;
6131                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6132                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6133
6134                 for (i = 1; i < tp->irq_cnt; i++)
6135                         err |= tg3_rx_prodring_xfer(tp, dpr,
6136                                                     &tp->napi[i].prodring);
6137
6138                 wmb();
6139
6140                 if (std_prod_idx != dpr->rx_std_prod_idx)
6141                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6142                                      dpr->rx_std_prod_idx);
6143
6144                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6145                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6146                                      dpr->rx_jmb_prod_idx);
6147
6148                 mmiowb();
6149
6150                 if (err)
6151                         tw32_f(HOSTCC_MODE, tp->coal_now);
6152         }
6153
6154         return work_done;
6155 }
6156
6157 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6158 {
6159         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6160                 schedule_work(&tp->reset_task);
6161 }
6162
6163 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6164 {
6165         cancel_work_sync(&tp->reset_task);
6166         tg3_flag_clear(tp, RESET_TASK_PENDING);
6167         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6168 }
6169
6170 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6171 {
6172         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6173         struct tg3 *tp = tnapi->tp;
6174         int work_done = 0;
6175         struct tg3_hw_status *sblk = tnapi->hw_status;
6176
6177         while (1) {
6178                 work_done = tg3_poll_work(tnapi, work_done, budget);
6179
6180                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6181                         goto tx_recovery;
6182
6183                 if (unlikely(work_done >= budget))
6184                         break;
6185
6186                 /* tp->last_tag is used in tg3_int_reenable() below
6187                  * to tell the hw how much work has been processed,
6188                  * so we must read it before checking for more work.
6189                  */
6190                 tnapi->last_tag = sblk->status_tag;
6191                 tnapi->last_irq_tag = tnapi->last_tag;
6192                 rmb();
6193
6194                 /* check for RX/TX work to do */
6195                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6196                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6197                         napi_complete(napi);
6198                         /* Reenable interrupts. */
6199                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6200                         mmiowb();
6201                         break;
6202                 }
6203         }
6204
6205         return work_done;
6206
6207 tx_recovery:
6208         /* work_done is guaranteed to be less than budget. */
6209         napi_complete(napi);
6210         tg3_reset_task_schedule(tp);
6211         return work_done;
6212 }
6213
6214 static void tg3_process_error(struct tg3 *tp)
6215 {
6216         u32 val;
6217         bool real_error = false;
6218
6219         if (tg3_flag(tp, ERROR_PROCESSED))
6220                 return;
6221
6222         /* Check Flow Attention register */
6223         val = tr32(HOSTCC_FLOW_ATTN);
6224         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6225                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6226                 real_error = true;
6227         }
6228
6229         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6230                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6231                 real_error = true;
6232         }
6233
6234         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6235                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6236                 real_error = true;
6237         }
6238
6239         if (!real_error)
6240                 return;
6241
6242         tg3_dump_state(tp);
6243
6244         tg3_flag_set(tp, ERROR_PROCESSED);
6245         tg3_reset_task_schedule(tp);
6246 }
6247
6248 static int tg3_poll(struct napi_struct *napi, int budget)
6249 {
6250         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6251         struct tg3 *tp = tnapi->tp;
6252         int work_done = 0;
6253         struct tg3_hw_status *sblk = tnapi->hw_status;
6254
6255         while (1) {
6256                 if (sblk->status & SD_STATUS_ERROR)
6257                         tg3_process_error(tp);
6258
6259                 tg3_poll_link(tp);
6260
6261                 work_done = tg3_poll_work(tnapi, work_done, budget);
6262
6263                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6264                         goto tx_recovery;
6265
6266                 if (unlikely(work_done >= budget))
6267                         break;
6268
6269                 if (tg3_flag(tp, TAGGED_STATUS)) {
6270                         /* tp->last_tag is used in tg3_int_reenable() below
6271                          * to tell the hw how much work has been processed,
6272                          * so we must read it before checking for more work.
6273                          */
6274                         tnapi->last_tag = sblk->status_tag;
6275                         tnapi->last_irq_tag = tnapi->last_tag;
6276                         rmb();
6277                 } else
6278                         sblk->status &= ~SD_STATUS_UPDATED;
6279
6280                 if (likely(!tg3_has_work(tnapi))) {
6281                         napi_complete(napi);
6282                         tg3_int_reenable(tnapi);
6283                         break;
6284                 }
6285         }
6286
6287         return work_done;
6288
6289 tx_recovery:
6290         /* work_done is guaranteed to be less than budget. */
6291         napi_complete(napi);
6292         tg3_reset_task_schedule(tp);
6293         return work_done;
6294 }
6295
6296 static void tg3_napi_disable(struct tg3 *tp)
6297 {
6298         int i;
6299
6300         for (i = tp->irq_cnt - 1; i >= 0; i--)
6301                 napi_disable(&tp->napi[i].napi);
6302 }
6303
6304 static void tg3_napi_enable(struct tg3 *tp)
6305 {
6306         int i;
6307
6308         for (i = 0; i < tp->irq_cnt; i++)
6309                 napi_enable(&tp->napi[i].napi);
6310 }
6311
6312 static void tg3_napi_init(struct tg3 *tp)
6313 {
6314         int i;
6315
6316         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6317         for (i = 1; i < tp->irq_cnt; i++)
6318                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6319 }
6320
6321 static void tg3_napi_fini(struct tg3 *tp)
6322 {
6323         int i;
6324
6325         for (i = 0; i < tp->irq_cnt; i++)
6326                 netif_napi_del(&tp->napi[i].napi);
6327 }
6328
6329 static inline void tg3_netif_stop(struct tg3 *tp)
6330 {
6331         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6332         tg3_napi_disable(tp);
6333         netif_tx_disable(tp->dev);
6334 }
6335
6336 static inline void tg3_netif_start(struct tg3 *tp)
6337 {
6338         /* NOTE: unconditional netif_tx_wake_all_queues is only
6339          * appropriate so long as all callers are assured to
6340          * have free tx slots (such as after tg3_init_hw)
6341          */
6342         netif_tx_wake_all_queues(tp->dev);
6343
6344         tg3_napi_enable(tp);
6345         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6346         tg3_enable_ints(tp);
6347 }
6348
6349 static void tg3_irq_quiesce(struct tg3 *tp)
6350 {
6351         int i;
6352
6353         BUG_ON(tp->irq_sync);
6354
6355         tp->irq_sync = 1;
6356         smp_mb();
6357
6358         for (i = 0; i < tp->irq_cnt; i++)
6359                 synchronize_irq(tp->napi[i].irq_vec);
6360 }
6361
6362 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6363  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6364  * with as well.  Most of the time, this is not necessary except when
6365  * shutting down the device.
6366  */
6367 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6368 {
6369         spin_lock_bh(&tp->lock);
6370         if (irq_sync)
6371                 tg3_irq_quiesce(tp);
6372 }
6373
6374 static inline void tg3_full_unlock(struct tg3 *tp)
6375 {
6376         spin_unlock_bh(&tp->lock);
6377 }
6378
6379 /* One-shot MSI handler - Chip automatically disables interrupt
6380  * after sending MSI so driver doesn't have to do it.
6381  */
6382 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6383 {
6384         struct tg3_napi *tnapi = dev_id;
6385         struct tg3 *tp = tnapi->tp;
6386
6387         prefetch(tnapi->hw_status);
6388         if (tnapi->rx_rcb)
6389                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6390
6391         if (likely(!tg3_irq_sync(tp)))
6392                 napi_schedule(&tnapi->napi);
6393
6394         return IRQ_HANDLED;
6395 }
6396
6397 /* MSI ISR - No need to check for interrupt sharing and no need to
6398  * flush status block and interrupt mailbox. PCI ordering rules
6399  * guarantee that MSI will arrive after the status block.
6400  */
6401 static irqreturn_t tg3_msi(int irq, void *dev_id)
6402 {
6403         struct tg3_napi *tnapi = dev_id;
6404         struct tg3 *tp = tnapi->tp;
6405
6406         prefetch(tnapi->hw_status);
6407         if (tnapi->rx_rcb)
6408                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6409         /*
6410          * Writing any value to intr-mbox-0 clears PCI INTA# and
6411          * chip-internal interrupt pending events.
6412          * Writing non-zero to intr-mbox-0 additional tells the
6413          * NIC to stop sending us irqs, engaging "in-intr-handler"
6414          * event coalescing.
6415          */
6416         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6417         if (likely(!tg3_irq_sync(tp)))
6418                 napi_schedule(&tnapi->napi);
6419
6420         return IRQ_RETVAL(1);
6421 }
6422
6423 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6424 {
6425         struct tg3_napi *tnapi = dev_id;
6426         struct tg3 *tp = tnapi->tp;
6427         struct tg3_hw_status *sblk = tnapi->hw_status;
6428         unsigned int handled = 1;
6429
6430         /* In INTx mode, it is possible for the interrupt to arrive at
6431          * the CPU before the status block posted prior to the interrupt.
6432          * Reading the PCI State register will confirm whether the
6433          * interrupt is ours and will flush the status block.
6434          */
6435         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6436                 if (tg3_flag(tp, CHIP_RESETTING) ||
6437                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6438                         handled = 0;
6439                         goto out;
6440                 }
6441         }
6442
6443         /*
6444          * Writing any value to intr-mbox-0 clears PCI INTA# and
6445          * chip-internal interrupt pending events.
6446          * Writing non-zero to intr-mbox-0 additional tells the
6447          * NIC to stop sending us irqs, engaging "in-intr-handler"
6448          * event coalescing.
6449          *
6450          * Flush the mailbox to de-assert the IRQ immediately to prevent
6451          * spurious interrupts.  The flush impacts performance but
6452          * excessive spurious interrupts can be worse in some cases.
6453          */
6454         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6455         if (tg3_irq_sync(tp))
6456                 goto out;
6457         sblk->status &= ~SD_STATUS_UPDATED;
6458         if (likely(tg3_has_work(tnapi))) {
6459                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6460                 napi_schedule(&tnapi->napi);
6461         } else {
6462                 /* No work, shared interrupt perhaps?  re-enable
6463                  * interrupts, and flush that PCI write
6464                  */
6465                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6466                                0x00000000);
6467         }
6468 out:
6469         return IRQ_RETVAL(handled);
6470 }
6471
6472 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6473 {
6474         struct tg3_napi *tnapi = dev_id;
6475         struct tg3 *tp = tnapi->tp;
6476         struct tg3_hw_status *sblk = tnapi->hw_status;
6477         unsigned int handled = 1;
6478
6479         /* In INTx mode, it is possible for the interrupt to arrive at
6480          * the CPU before the status block posted prior to the interrupt.
6481          * Reading the PCI State register will confirm whether the
6482          * interrupt is ours and will flush the status block.
6483          */
6484         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6485                 if (tg3_flag(tp, CHIP_RESETTING) ||
6486                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6487                         handled = 0;
6488                         goto out;
6489                 }
6490         }
6491
6492         /*
6493          * writing any value to intr-mbox-0 clears PCI INTA# and
6494          * chip-internal interrupt pending events.
6495          * writing non-zero to intr-mbox-0 additional tells the
6496          * NIC to stop sending us irqs, engaging "in-intr-handler"
6497          * event coalescing.
6498          *
6499          * Flush the mailbox to de-assert the IRQ immediately to prevent
6500          * spurious interrupts.  The flush impacts performance but
6501          * excessive spurious interrupts can be worse in some cases.
6502          */
6503         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6504
6505         /*
6506          * In a shared interrupt configuration, sometimes other devices'
6507          * interrupts will scream.  We record the current status tag here
6508          * so that the above check can report that the screaming interrupts
6509          * are unhandled.  Eventually they will be silenced.
6510          */
6511         tnapi->last_irq_tag = sblk->status_tag;
6512
6513         if (tg3_irq_sync(tp))
6514                 goto out;
6515
6516         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6517
6518         napi_schedule(&tnapi->napi);
6519
6520 out:
6521         return IRQ_RETVAL(handled);
6522 }
6523
6524 /* ISR for interrupt test */
6525 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6526 {
6527         struct tg3_napi *tnapi = dev_id;
6528         struct tg3 *tp = tnapi->tp;
6529         struct tg3_hw_status *sblk = tnapi->hw_status;
6530
6531         if ((sblk->status & SD_STATUS_UPDATED) ||
6532             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6533                 tg3_disable_ints(tp);
6534                 return IRQ_RETVAL(1);
6535         }
6536         return IRQ_RETVAL(0);
6537 }
6538
6539 #ifdef CONFIG_NET_POLL_CONTROLLER
6540 static void tg3_poll_controller(struct net_device *dev)
6541 {
6542         int i;
6543         struct tg3 *tp = netdev_priv(dev);
6544
6545         for (i = 0; i < tp->irq_cnt; i++)
6546                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6547 }
6548 #endif
6549
6550 static void tg3_tx_timeout(struct net_device *dev)
6551 {
6552         struct tg3 *tp = netdev_priv(dev);
6553
6554         if (netif_msg_tx_err(tp)) {
6555                 netdev_err(dev, "transmit timed out, resetting\n");
6556                 tg3_dump_state(tp);
6557         }
6558
6559         tg3_reset_task_schedule(tp);
6560 }
6561
6562 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6563 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6564 {
6565         u32 base = (u32) mapping & 0xffffffff;
6566
6567         return (base > 0xffffdcc0) && (base + len + 8 < base);
6568 }
6569
6570 /* Test for DMA addresses > 40-bit */
6571 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6572                                           int len)
6573 {
6574 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6575         if (tg3_flag(tp, 40BIT_DMA_BUG))
6576                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6577         return 0;
6578 #else
6579         return 0;
6580 #endif
6581 }
6582
6583 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6584                                  dma_addr_t mapping, u32 len, u32 flags,
6585                                  u32 mss, u32 vlan)
6586 {
6587         txbd->addr_hi = ((u64) mapping >> 32);
6588         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6589         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6590         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6591 }
6592
6593 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6594                             dma_addr_t map, u32 len, u32 flags,
6595                             u32 mss, u32 vlan)
6596 {
6597         struct tg3 *tp = tnapi->tp;
6598         bool hwbug = false;
6599
6600         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6601                 hwbug = true;
6602
6603         if (tg3_4g_overflow_test(map, len))
6604                 hwbug = true;
6605
6606         if (tg3_40bit_overflow_test(tp, map, len))
6607                 hwbug = true;
6608
6609         if (tp->dma_limit) {
6610                 u32 prvidx = *entry;
6611                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6612                 while (len > tp->dma_limit && *budget) {
6613                         u32 frag_len = tp->dma_limit;
6614                         len -= tp->dma_limit;
6615
6616                         /* Avoid the 8byte DMA problem */
6617                         if (len <= 8) {
6618                                 len += tp->dma_limit / 2;
6619                                 frag_len = tp->dma_limit / 2;
6620                         }
6621
6622                         tnapi->tx_buffers[*entry].fragmented = true;
6623
6624                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6625                                       frag_len, tmp_flag, mss, vlan);
6626                         *budget -= 1;
6627                         prvidx = *entry;
6628                         *entry = NEXT_TX(*entry);
6629
6630                         map += frag_len;
6631                 }
6632
6633                 if (len) {
6634                         if (*budget) {
6635                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6636                                               len, flags, mss, vlan);
6637                                 *budget -= 1;
6638                                 *entry = NEXT_TX(*entry);
6639                         } else {
6640                                 hwbug = true;
6641                                 tnapi->tx_buffers[prvidx].fragmented = false;
6642                         }
6643                 }
6644         } else {
6645                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6646                               len, flags, mss, vlan);
6647                 *entry = NEXT_TX(*entry);
6648         }
6649
6650         return hwbug;
6651 }
6652
6653 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6654 {
6655         int i;
6656         struct sk_buff *skb;
6657         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6658
6659         skb = txb->skb;
6660         txb->skb = NULL;
6661
6662         pci_unmap_single(tnapi->tp->pdev,
6663                          dma_unmap_addr(txb, mapping),
6664                          skb_headlen(skb),
6665                          PCI_DMA_TODEVICE);
6666
6667         while (txb->fragmented) {
6668                 txb->fragmented = false;
6669                 entry = NEXT_TX(entry);
6670                 txb = &tnapi->tx_buffers[entry];
6671         }
6672
6673         for (i = 0; i <= last; i++) {
6674                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6675
6676                 entry = NEXT_TX(entry);
6677                 txb = &tnapi->tx_buffers[entry];
6678
6679                 pci_unmap_page(tnapi->tp->pdev,
6680                                dma_unmap_addr(txb, mapping),
6681                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6682
6683                 while (txb->fragmented) {
6684                         txb->fragmented = false;
6685                         entry = NEXT_TX(entry);
6686                         txb = &tnapi->tx_buffers[entry];
6687                 }
6688         }
6689 }
6690
6691 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6692 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6693                                        struct sk_buff **pskb,
6694                                        u32 *entry, u32 *budget,
6695                                        u32 base_flags, u32 mss, u32 vlan)
6696 {
6697         struct tg3 *tp = tnapi->tp;
6698         struct sk_buff *new_skb, *skb = *pskb;
6699         dma_addr_t new_addr = 0;
6700         int ret = 0;
6701
6702         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6703                 new_skb = skb_copy(skb, GFP_ATOMIC);
6704         else {
6705                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6706
6707                 new_skb = skb_copy_expand(skb,
6708                                           skb_headroom(skb) + more_headroom,
6709                                           skb_tailroom(skb), GFP_ATOMIC);
6710         }
6711
6712         if (!new_skb) {
6713                 ret = -1;
6714         } else {
6715                 /* New SKB is guaranteed to be linear. */
6716                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6717                                           PCI_DMA_TODEVICE);
6718                 /* Make sure the mapping succeeded */
6719                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6720                         dev_kfree_skb(new_skb);
6721                         ret = -1;
6722                 } else {
6723                         u32 save_entry = *entry;
6724
6725                         base_flags |= TXD_FLAG_END;
6726
6727                         tnapi->tx_buffers[*entry].skb = new_skb;
6728                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6729                                            mapping, new_addr);
6730
6731                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6732                                             new_skb->len, base_flags,
6733                                             mss, vlan)) {
6734                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6735                                 dev_kfree_skb(new_skb);
6736                                 ret = -1;
6737                         }
6738                 }
6739         }
6740
6741         dev_kfree_skb(skb);
6742         *pskb = new_skb;
6743         return ret;
6744 }
6745
6746 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6747
6748 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6749  * TSO header is greater than 80 bytes.
6750  */
6751 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6752 {
6753         struct sk_buff *segs, *nskb;
6754         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6755
6756         /* Estimate the number of fragments in the worst case */
6757         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6758                 netif_stop_queue(tp->dev);
6759
6760                 /* netif_tx_stop_queue() must be done before checking
6761                  * checking tx index in tg3_tx_avail() below, because in
6762                  * tg3_tx(), we update tx index before checking for
6763                  * netif_tx_queue_stopped().
6764                  */
6765                 smp_mb();
6766                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6767                         return NETDEV_TX_BUSY;
6768
6769                 netif_wake_queue(tp->dev);
6770         }
6771
6772         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6773         if (IS_ERR(segs))
6774                 goto tg3_tso_bug_end;
6775
6776         do {
6777                 nskb = segs;
6778                 segs = segs->next;
6779                 nskb->next = NULL;
6780                 tg3_start_xmit(nskb, tp->dev);
6781         } while (segs);
6782
6783 tg3_tso_bug_end:
6784         dev_kfree_skb(skb);
6785
6786         return NETDEV_TX_OK;
6787 }
6788
6789 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6790  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6791  */
6792 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6793 {
6794         struct tg3 *tp = netdev_priv(dev);
6795         u32 len, entry, base_flags, mss, vlan = 0;
6796         u32 budget;
6797         int i = -1, would_hit_hwbug;
6798         dma_addr_t mapping;
6799         struct tg3_napi *tnapi;
6800         struct netdev_queue *txq;
6801         unsigned int last;
6802
6803         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6804         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6805         if (tg3_flag(tp, ENABLE_TSS))
6806                 tnapi++;
6807
6808         budget = tg3_tx_avail(tnapi);
6809
6810         /* We are running in BH disabled context with netif_tx_lock
6811          * and TX reclaim runs via tp->napi.poll inside of a software
6812          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6813          * no IRQ context deadlocks to worry about either.  Rejoice!
6814          */
6815         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6816                 if (!netif_tx_queue_stopped(txq)) {
6817                         netif_tx_stop_queue(txq);
6818
6819                         /* This is a hard error, log it. */
6820                         netdev_err(dev,
6821                                    "BUG! Tx Ring full when queue awake!\n");
6822                 }
6823                 return NETDEV_TX_BUSY;
6824         }
6825
6826         entry = tnapi->tx_prod;
6827         base_flags = 0;
6828         if (skb->ip_summed == CHECKSUM_PARTIAL)
6829                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6830
6831         mss = skb_shinfo(skb)->gso_size;
6832         if (mss) {
6833                 struct iphdr *iph;
6834                 u32 tcp_opt_len, hdr_len;
6835
6836                 if (skb_header_cloned(skb) &&
6837                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6838                         goto drop;
6839
6840                 iph = ip_hdr(skb);
6841                 tcp_opt_len = tcp_optlen(skb);
6842
6843                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6844
6845                 if (!skb_is_gso_v6(skb)) {
6846                         iph->check = 0;
6847                         iph->tot_len = htons(mss + hdr_len);
6848                 }
6849
6850                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6851                     tg3_flag(tp, TSO_BUG))
6852                         return tg3_tso_bug(tp, skb);
6853
6854                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6855                                TXD_FLAG_CPU_POST_DMA);
6856
6857                 if (tg3_flag(tp, HW_TSO_1) ||
6858                     tg3_flag(tp, HW_TSO_2) ||
6859                     tg3_flag(tp, HW_TSO_3)) {
6860                         tcp_hdr(skb)->check = 0;
6861                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6862                 } else
6863                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6864                                                                  iph->daddr, 0,
6865                                                                  IPPROTO_TCP,
6866                                                                  0);
6867
6868                 if (tg3_flag(tp, HW_TSO_3)) {
6869                         mss |= (hdr_len & 0xc) << 12;
6870                         if (hdr_len & 0x10)
6871                                 base_flags |= 0x00000010;
6872                         base_flags |= (hdr_len & 0x3e0) << 5;
6873                 } else if (tg3_flag(tp, HW_TSO_2))
6874                         mss |= hdr_len << 9;
6875                 else if (tg3_flag(tp, HW_TSO_1) ||
6876                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6877                         if (tcp_opt_len || iph->ihl > 5) {
6878                                 int tsflags;
6879
6880                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6881                                 mss |= (tsflags << 11);
6882                         }
6883                 } else {
6884                         if (tcp_opt_len || iph->ihl > 5) {
6885                                 int tsflags;
6886
6887                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6888                                 base_flags |= tsflags << 12;
6889                         }
6890                 }
6891         }
6892
6893         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6894             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6895                 base_flags |= TXD_FLAG_JMB_PKT;
6896
6897         if (vlan_tx_tag_present(skb)) {
6898                 base_flags |= TXD_FLAG_VLAN;
6899                 vlan = vlan_tx_tag_get(skb);
6900         }
6901
6902         len = skb_headlen(skb);
6903
6904         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6905         if (pci_dma_mapping_error(tp->pdev, mapping))
6906                 goto drop;
6907
6908
6909         tnapi->tx_buffers[entry].skb = skb;
6910         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6911
6912         would_hit_hwbug = 0;
6913
6914         if (tg3_flag(tp, 5701_DMA_BUG))
6915                 would_hit_hwbug = 1;
6916
6917         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6918                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6919                             mss, vlan)) {
6920                 would_hit_hwbug = 1;
6921         } else if (skb_shinfo(skb)->nr_frags > 0) {
6922                 u32 tmp_mss = mss;
6923
6924                 if (!tg3_flag(tp, HW_TSO_1) &&
6925                     !tg3_flag(tp, HW_TSO_2) &&
6926                     !tg3_flag(tp, HW_TSO_3))
6927                         tmp_mss = 0;
6928
6929                 /* Now loop through additional data
6930                  * fragments, and queue them.
6931                  */
6932                 last = skb_shinfo(skb)->nr_frags - 1;
6933                 for (i = 0; i <= last; i++) {
6934                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6935
6936                         len = skb_frag_size(frag);
6937                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6938                                                    len, DMA_TO_DEVICE);
6939
6940                         tnapi->tx_buffers[entry].skb = NULL;
6941                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6942                                            mapping);
6943                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6944                                 goto dma_error;
6945
6946                         if (!budget ||
6947                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6948                                             len, base_flags |
6949                                             ((i == last) ? TXD_FLAG_END : 0),
6950                                             tmp_mss, vlan)) {
6951                                 would_hit_hwbug = 1;
6952                                 break;
6953                         }
6954                 }
6955         }
6956
6957         if (would_hit_hwbug) {
6958                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6959
6960                 /* If the workaround fails due to memory/mapping
6961                  * failure, silently drop this packet.
6962                  */
6963                 entry = tnapi->tx_prod;
6964                 budget = tg3_tx_avail(tnapi);
6965                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6966                                                 base_flags, mss, vlan))
6967                         goto drop_nofree;
6968         }
6969
6970         skb_tx_timestamp(skb);
6971         netdev_sent_queue(tp->dev, skb->len);
6972
6973         /* Packets are ready, update Tx producer idx local and on card. */
6974         tw32_tx_mbox(tnapi->prodmbox, entry);
6975
6976         tnapi->tx_prod = entry;
6977         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6978                 netif_tx_stop_queue(txq);
6979
6980                 /* netif_tx_stop_queue() must be done before checking
6981                  * checking tx index in tg3_tx_avail() below, because in
6982                  * tg3_tx(), we update tx index before checking for
6983                  * netif_tx_queue_stopped().
6984                  */
6985                 smp_mb();
6986                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6987                         netif_tx_wake_queue(txq);
6988         }
6989
6990         mmiowb();
6991         return NETDEV_TX_OK;
6992
6993 dma_error:
6994         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6995         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6996 drop:
6997         dev_kfree_skb(skb);
6998 drop_nofree:
6999         tp->tx_dropped++;
7000         return NETDEV_TX_OK;
7001 }
7002
7003 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7004 {
7005         if (enable) {
7006                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7007                                   MAC_MODE_PORT_MODE_MASK);
7008
7009                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7010
7011                 if (!tg3_flag(tp, 5705_PLUS))
7012                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7013
7014                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7015                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7016                 else
7017                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7018         } else {
7019                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7020
7021                 if (tg3_flag(tp, 5705_PLUS) ||
7022                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7023                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7024                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7025         }
7026
7027         tw32(MAC_MODE, tp->mac_mode);
7028         udelay(40);
7029 }
7030
7031 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7032 {
7033         u32 val, bmcr, mac_mode, ptest = 0;
7034
7035         tg3_phy_toggle_apd(tp, false);
7036         tg3_phy_toggle_automdix(tp, 0);
7037
7038         if (extlpbk && tg3_phy_set_extloopbk(tp))
7039                 return -EIO;
7040
7041         bmcr = BMCR_FULLDPLX;
7042         switch (speed) {
7043         case SPEED_10:
7044                 break;
7045         case SPEED_100:
7046                 bmcr |= BMCR_SPEED100;
7047                 break;
7048         case SPEED_1000:
7049         default:
7050                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7051                         speed = SPEED_100;
7052                         bmcr |= BMCR_SPEED100;
7053                 } else {
7054                         speed = SPEED_1000;
7055                         bmcr |= BMCR_SPEED1000;
7056                 }
7057         }
7058
7059         if (extlpbk) {
7060                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7061                         tg3_readphy(tp, MII_CTRL1000, &val);
7062                         val |= CTL1000_AS_MASTER |
7063                                CTL1000_ENABLE_MASTER;
7064                         tg3_writephy(tp, MII_CTRL1000, val);
7065                 } else {
7066                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7067                                 MII_TG3_FET_PTEST_TRIM_2;
7068                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7069                 }
7070         } else
7071                 bmcr |= BMCR_LOOPBACK;
7072
7073         tg3_writephy(tp, MII_BMCR, bmcr);
7074
7075         /* The write needs to be flushed for the FETs */
7076         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7077                 tg3_readphy(tp, MII_BMCR, &bmcr);
7078
7079         udelay(40);
7080
7081         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7082             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7083                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7084                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7085                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7086
7087                 /* The write needs to be flushed for the AC131 */
7088                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7089         }
7090
7091         /* Reset to prevent losing 1st rx packet intermittently */
7092         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7093             tg3_flag(tp, 5780_CLASS)) {
7094                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7095                 udelay(10);
7096                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7097         }
7098
7099         mac_mode = tp->mac_mode &
7100                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7101         if (speed == SPEED_1000)
7102                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7103         else
7104                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7105
7106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7107                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7108
7109                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7110                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7111                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7112                         mac_mode |= MAC_MODE_LINK_POLARITY;
7113
7114                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7115                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7116         }
7117
7118         tw32(MAC_MODE, mac_mode);
7119         udelay(40);
7120
7121         return 0;
7122 }
7123
7124 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7125 {
7126         struct tg3 *tp = netdev_priv(dev);
7127
7128         if (features & NETIF_F_LOOPBACK) {
7129                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7130                         return;
7131
7132                 spin_lock_bh(&tp->lock);
7133                 tg3_mac_loopback(tp, true);
7134                 netif_carrier_on(tp->dev);
7135                 spin_unlock_bh(&tp->lock);
7136                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7137         } else {
7138                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7139                         return;
7140
7141                 spin_lock_bh(&tp->lock);
7142                 tg3_mac_loopback(tp, false);
7143                 /* Force link status check */
7144                 tg3_setup_phy(tp, 1);
7145                 spin_unlock_bh(&tp->lock);
7146                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7147         }
7148 }
7149
7150 static netdev_features_t tg3_fix_features(struct net_device *dev,
7151         netdev_features_t features)
7152 {
7153         struct tg3 *tp = netdev_priv(dev);
7154
7155         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7156                 features &= ~NETIF_F_ALL_TSO;
7157
7158         return features;
7159 }
7160
7161 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7162 {
7163         netdev_features_t changed = dev->features ^ features;
7164
7165         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7166                 tg3_set_loopback(dev, features);
7167
7168         return 0;
7169 }
7170
7171 static void tg3_rx_prodring_free(struct tg3 *tp,
7172                                  struct tg3_rx_prodring_set *tpr)
7173 {
7174         int i;
7175
7176         if (tpr != &tp->napi[0].prodring) {
7177                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7178                      i = (i + 1) & tp->rx_std_ring_mask)
7179                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7180                                         tp->rx_pkt_map_sz);
7181
7182                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7183                         for (i = tpr->rx_jmb_cons_idx;
7184                              i != tpr->rx_jmb_prod_idx;
7185                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7186                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7187                                                 TG3_RX_JMB_MAP_SZ);
7188                         }
7189                 }
7190
7191                 return;
7192         }
7193
7194         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7195                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7196                                 tp->rx_pkt_map_sz);
7197
7198         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7199                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7200                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7201                                         TG3_RX_JMB_MAP_SZ);
7202         }
7203 }
7204
7205 /* Initialize rx rings for packet processing.
7206  *
7207  * The chip has been shut down and the driver detached from
7208  * the networking, so no interrupts or new tx packets will
7209  * end up in the driver.  tp->{tx,}lock are held and thus
7210  * we may not sleep.
7211  */
7212 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7213                                  struct tg3_rx_prodring_set *tpr)
7214 {
7215         u32 i, rx_pkt_dma_sz;
7216
7217         tpr->rx_std_cons_idx = 0;
7218         tpr->rx_std_prod_idx = 0;
7219         tpr->rx_jmb_cons_idx = 0;
7220         tpr->rx_jmb_prod_idx = 0;
7221
7222         if (tpr != &tp->napi[0].prodring) {
7223                 memset(&tpr->rx_std_buffers[0], 0,
7224                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7225                 if (tpr->rx_jmb_buffers)
7226                         memset(&tpr->rx_jmb_buffers[0], 0,
7227                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7228                 goto done;
7229         }
7230
7231         /* Zero out all descriptors. */
7232         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7233
7234         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7235         if (tg3_flag(tp, 5780_CLASS) &&
7236             tp->dev->mtu > ETH_DATA_LEN)
7237                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7238         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7239
7240         /* Initialize invariants of the rings, we only set this
7241          * stuff once.  This works because the card does not
7242          * write into the rx buffer posting rings.
7243          */
7244         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7245                 struct tg3_rx_buffer_desc *rxd;
7246
7247                 rxd = &tpr->rx_std[i];
7248                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7249                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7250                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7251                                (i << RXD_OPAQUE_INDEX_SHIFT));
7252         }
7253
7254         /* Now allocate fresh SKBs for each rx ring. */
7255         for (i = 0; i < tp->rx_pending; i++) {
7256                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7257                         netdev_warn(tp->dev,
7258                                     "Using a smaller RX standard ring. Only "
7259                                     "%d out of %d buffers were allocated "
7260                                     "successfully\n", i, tp->rx_pending);
7261                         if (i == 0)
7262                                 goto initfail;
7263                         tp->rx_pending = i;
7264                         break;
7265                 }
7266         }
7267
7268         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7269                 goto done;
7270
7271         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7272
7273         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7274                 goto done;
7275
7276         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7277                 struct tg3_rx_buffer_desc *rxd;
7278
7279                 rxd = &tpr->rx_jmb[i].std;
7280                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7281                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7282                                   RXD_FLAG_JUMBO;
7283                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7284                        (i << RXD_OPAQUE_INDEX_SHIFT));
7285         }
7286
7287         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7288                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7289                         netdev_warn(tp->dev,
7290                                     "Using a smaller RX jumbo ring. Only %d "
7291                                     "out of %d buffers were allocated "
7292                                     "successfully\n", i, tp->rx_jumbo_pending);
7293                         if (i == 0)
7294                                 goto initfail;
7295                         tp->rx_jumbo_pending = i;
7296                         break;
7297                 }
7298         }
7299
7300 done:
7301         return 0;
7302
7303 initfail:
7304         tg3_rx_prodring_free(tp, tpr);
7305         return -ENOMEM;
7306 }
7307
7308 static void tg3_rx_prodring_fini(struct tg3 *tp,
7309                                  struct tg3_rx_prodring_set *tpr)
7310 {
7311         kfree(tpr->rx_std_buffers);
7312         tpr->rx_std_buffers = NULL;
7313         kfree(tpr->rx_jmb_buffers);
7314         tpr->rx_jmb_buffers = NULL;
7315         if (tpr->rx_std) {
7316                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7317                                   tpr->rx_std, tpr->rx_std_mapping);
7318                 tpr->rx_std = NULL;
7319         }
7320         if (tpr->rx_jmb) {
7321                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7322                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7323                 tpr->rx_jmb = NULL;
7324         }
7325 }
7326
7327 static int tg3_rx_prodring_init(struct tg3 *tp,
7328                                 struct tg3_rx_prodring_set *tpr)
7329 {
7330         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7331                                       GFP_KERNEL);
7332         if (!tpr->rx_std_buffers)
7333                 return -ENOMEM;
7334
7335         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7336                                          TG3_RX_STD_RING_BYTES(tp),
7337                                          &tpr->rx_std_mapping,
7338                                          GFP_KERNEL);
7339         if (!tpr->rx_std)
7340                 goto err_out;
7341
7342         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7343                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7344                                               GFP_KERNEL);
7345                 if (!tpr->rx_jmb_buffers)
7346                         goto err_out;
7347
7348                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7349                                                  TG3_RX_JMB_RING_BYTES(tp),
7350                                                  &tpr->rx_jmb_mapping,
7351                                                  GFP_KERNEL);
7352                 if (!tpr->rx_jmb)
7353                         goto err_out;
7354         }
7355
7356         return 0;
7357
7358 err_out:
7359         tg3_rx_prodring_fini(tp, tpr);
7360         return -ENOMEM;
7361 }
7362
7363 /* Free up pending packets in all rx/tx rings.
7364  *
7365  * The chip has been shut down and the driver detached from
7366  * the networking, so no interrupts or new tx packets will
7367  * end up in the driver.  tp->{tx,}lock is not held and we are not
7368  * in an interrupt context and thus may sleep.
7369  */
7370 static void tg3_free_rings(struct tg3 *tp)
7371 {
7372         int i, j;
7373
7374         for (j = 0; j < tp->irq_cnt; j++) {
7375                 struct tg3_napi *tnapi = &tp->napi[j];
7376
7377                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7378
7379                 if (!tnapi->tx_buffers)
7380                         continue;
7381
7382                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7383                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7384
7385                         if (!skb)
7386                                 continue;
7387
7388                         tg3_tx_skb_unmap(tnapi, i,
7389                                          skb_shinfo(skb)->nr_frags - 1);
7390
7391                         dev_kfree_skb_any(skb);
7392                 }
7393         }
7394         netdev_reset_queue(tp->dev);
7395 }
7396
7397 /* Initialize tx/rx rings for packet processing.
7398  *
7399  * The chip has been shut down and the driver detached from
7400  * the networking, so no interrupts or new tx packets will
7401  * end up in the driver.  tp->{tx,}lock are held and thus
7402  * we may not sleep.
7403  */
7404 static int tg3_init_rings(struct tg3 *tp)
7405 {
7406         int i;
7407
7408         /* Free up all the SKBs. */
7409         tg3_free_rings(tp);
7410
7411         for (i = 0; i < tp->irq_cnt; i++) {
7412                 struct tg3_napi *tnapi = &tp->napi[i];
7413
7414                 tnapi->last_tag = 0;
7415                 tnapi->last_irq_tag = 0;
7416                 tnapi->hw_status->status = 0;
7417                 tnapi->hw_status->status_tag = 0;
7418                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7419
7420                 tnapi->tx_prod = 0;
7421                 tnapi->tx_cons = 0;
7422                 if (tnapi->tx_ring)
7423                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7424
7425                 tnapi->rx_rcb_ptr = 0;
7426                 if (tnapi->rx_rcb)
7427                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7428
7429                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7430                         tg3_free_rings(tp);
7431                         return -ENOMEM;
7432                 }
7433         }
7434
7435         return 0;
7436 }
7437
7438 /*
7439  * Must not be invoked with interrupt sources disabled and
7440  * the hardware shutdown down.
7441  */
7442 static void tg3_free_consistent(struct tg3 *tp)
7443 {
7444         int i;
7445
7446         for (i = 0; i < tp->irq_cnt; i++) {
7447                 struct tg3_napi *tnapi = &tp->napi[i];
7448
7449                 if (tnapi->tx_ring) {
7450                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7451                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7452                         tnapi->tx_ring = NULL;
7453                 }
7454
7455                 kfree(tnapi->tx_buffers);
7456                 tnapi->tx_buffers = NULL;
7457
7458                 if (tnapi->rx_rcb) {
7459                         dma_free_coherent(&tp->pdev->dev,
7460                                           TG3_RX_RCB_RING_BYTES(tp),
7461                                           tnapi->rx_rcb,
7462                                           tnapi->rx_rcb_mapping);
7463                         tnapi->rx_rcb = NULL;
7464                 }
7465
7466                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7467
7468                 if (tnapi->hw_status) {
7469                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7470                                           tnapi->hw_status,
7471                                           tnapi->status_mapping);
7472                         tnapi->hw_status = NULL;
7473                 }
7474         }
7475
7476         if (tp->hw_stats) {
7477                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7478                                   tp->hw_stats, tp->stats_mapping);
7479                 tp->hw_stats = NULL;
7480         }
7481 }
7482
7483 /*
7484  * Must not be invoked with interrupt sources disabled and
7485  * the hardware shutdown down.  Can sleep.
7486  */
7487 static int tg3_alloc_consistent(struct tg3 *tp)
7488 {
7489         int i;
7490
7491         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7492                                           sizeof(struct tg3_hw_stats),
7493                                           &tp->stats_mapping,
7494                                           GFP_KERNEL);
7495         if (!tp->hw_stats)
7496                 goto err_out;
7497
7498         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7499
7500         for (i = 0; i < tp->irq_cnt; i++) {
7501                 struct tg3_napi *tnapi = &tp->napi[i];
7502                 struct tg3_hw_status *sblk;
7503
7504                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7505                                                       TG3_HW_STATUS_SIZE,
7506                                                       &tnapi->status_mapping,
7507                                                       GFP_KERNEL);
7508                 if (!tnapi->hw_status)
7509                         goto err_out;
7510
7511                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7512                 sblk = tnapi->hw_status;
7513
7514                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7515                         goto err_out;
7516
7517                 /* If multivector TSS is enabled, vector 0 does not handle
7518                  * tx interrupts.  Don't allocate any resources for it.
7519                  */
7520                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7521                     (i && tg3_flag(tp, ENABLE_TSS))) {
7522                         tnapi->tx_buffers = kzalloc(
7523                                                sizeof(struct tg3_tx_ring_info) *
7524                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7525                         if (!tnapi->tx_buffers)
7526                                 goto err_out;
7527
7528                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7529                                                             TG3_TX_RING_BYTES,
7530                                                         &tnapi->tx_desc_mapping,
7531                                                             GFP_KERNEL);
7532                         if (!tnapi->tx_ring)
7533                                 goto err_out;
7534                 }
7535
7536                 /*
7537                  * When RSS is enabled, the status block format changes
7538                  * slightly.  The "rx_jumbo_consumer", "reserved",
7539                  * and "rx_mini_consumer" members get mapped to the
7540                  * other three rx return ring producer indexes.
7541                  */
7542                 switch (i) {
7543                 default:
7544                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7545                         break;
7546                 case 2:
7547                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7548                         break;
7549                 case 3:
7550                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7551                         break;
7552                 case 4:
7553                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7554                         break;
7555                 }
7556
7557                 /*
7558                  * If multivector RSS is enabled, vector 0 does not handle
7559                  * rx or tx interrupts.  Don't allocate any resources for it.
7560                  */
7561                 if (!i && tg3_flag(tp, ENABLE_RSS))
7562                         continue;
7563
7564                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7565                                                    TG3_RX_RCB_RING_BYTES(tp),
7566                                                    &tnapi->rx_rcb_mapping,
7567                                                    GFP_KERNEL);
7568                 if (!tnapi->rx_rcb)
7569                         goto err_out;
7570
7571                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7572         }
7573
7574         return 0;
7575
7576 err_out:
7577         tg3_free_consistent(tp);
7578         return -ENOMEM;
7579 }
7580
7581 #define MAX_WAIT_CNT 1000
7582
7583 /* To stop a block, clear the enable bit and poll till it
7584  * clears.  tp->lock is held.
7585  */
7586 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7587 {
7588         unsigned int i;
7589         u32 val;
7590
7591         if (tg3_flag(tp, 5705_PLUS)) {
7592                 switch (ofs) {
7593                 case RCVLSC_MODE:
7594                 case DMAC_MODE:
7595                 case MBFREE_MODE:
7596                 case BUFMGR_MODE:
7597                 case MEMARB_MODE:
7598                         /* We can't enable/disable these bits of the
7599                          * 5705/5750, just say success.
7600                          */
7601                         return 0;
7602
7603                 default:
7604                         break;
7605                 }
7606         }
7607
7608         val = tr32(ofs);
7609         val &= ~enable_bit;
7610         tw32_f(ofs, val);
7611
7612         for (i = 0; i < MAX_WAIT_CNT; i++) {
7613                 udelay(100);
7614                 val = tr32(ofs);
7615                 if ((val & enable_bit) == 0)
7616                         break;
7617         }
7618
7619         if (i == MAX_WAIT_CNT && !silent) {
7620                 dev_err(&tp->pdev->dev,
7621                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7622                         ofs, enable_bit);
7623                 return -ENODEV;
7624         }
7625
7626         return 0;
7627 }
7628
7629 /* tp->lock is held. */
7630 static int tg3_abort_hw(struct tg3 *tp, int silent)
7631 {
7632         int i, err;
7633
7634         tg3_disable_ints(tp);
7635
7636         tp->rx_mode &= ~RX_MODE_ENABLE;
7637         tw32_f(MAC_RX_MODE, tp->rx_mode);
7638         udelay(10);
7639
7640         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7641         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7642         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7643         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7644         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7645         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7646
7647         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7648         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7649         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7650         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7651         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7652         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7653         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7654
7655         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7656         tw32_f(MAC_MODE, tp->mac_mode);
7657         udelay(40);
7658
7659         tp->tx_mode &= ~TX_MODE_ENABLE;
7660         tw32_f(MAC_TX_MODE, tp->tx_mode);
7661
7662         for (i = 0; i < MAX_WAIT_CNT; i++) {
7663                 udelay(100);
7664                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7665                         break;
7666         }
7667         if (i >= MAX_WAIT_CNT) {
7668                 dev_err(&tp->pdev->dev,
7669                         "%s timed out, TX_MODE_ENABLE will not clear "
7670                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7671                 err |= -ENODEV;
7672         }
7673
7674         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7675         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7676         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7677
7678         tw32(FTQ_RESET, 0xffffffff);
7679         tw32(FTQ_RESET, 0x00000000);
7680
7681         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7682         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7683
7684         for (i = 0; i < tp->irq_cnt; i++) {
7685                 struct tg3_napi *tnapi = &tp->napi[i];
7686                 if (tnapi->hw_status)
7687                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7688         }
7689
7690         return err;
7691 }
7692
7693 /* Save PCI command register before chip reset */
7694 static void tg3_save_pci_state(struct tg3 *tp)
7695 {
7696         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7697 }
7698
7699 /* Restore PCI state after chip reset */
7700 static void tg3_restore_pci_state(struct tg3 *tp)
7701 {
7702         u32 val;
7703
7704         /* Re-enable indirect register accesses. */
7705         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7706                                tp->misc_host_ctrl);
7707
7708         /* Set MAX PCI retry to zero. */
7709         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7710         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7711             tg3_flag(tp, PCIX_MODE))
7712                 val |= PCISTATE_RETRY_SAME_DMA;
7713         /* Allow reads and writes to the APE register and memory space. */
7714         if (tg3_flag(tp, ENABLE_APE))
7715                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7716                        PCISTATE_ALLOW_APE_SHMEM_WR |
7717                        PCISTATE_ALLOW_APE_PSPACE_WR;
7718         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7719
7720         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7721
7722         if (!tg3_flag(tp, PCI_EXPRESS)) {
7723                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7724                                       tp->pci_cacheline_sz);
7725                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7726                                       tp->pci_lat_timer);
7727         }
7728
7729         /* Make sure PCI-X relaxed ordering bit is clear. */
7730         if (tg3_flag(tp, PCIX_MODE)) {
7731                 u16 pcix_cmd;
7732
7733                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7734                                      &pcix_cmd);
7735                 pcix_cmd &= ~PCI_X_CMD_ERO;
7736                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7737                                       pcix_cmd);
7738         }
7739
7740         if (tg3_flag(tp, 5780_CLASS)) {
7741
7742                 /* Chip reset on 5780 will reset MSI enable bit,
7743                  * so need to restore it.
7744                  */
7745                 if (tg3_flag(tp, USING_MSI)) {
7746                         u16 ctrl;
7747
7748                         pci_read_config_word(tp->pdev,
7749                                              tp->msi_cap + PCI_MSI_FLAGS,
7750                                              &ctrl);
7751                         pci_write_config_word(tp->pdev,
7752                                               tp->msi_cap + PCI_MSI_FLAGS,
7753                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7754                         val = tr32(MSGINT_MODE);
7755                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7756                 }
7757         }
7758 }
7759
7760 /* tp->lock is held. */
7761 static int tg3_chip_reset(struct tg3 *tp)
7762 {
7763         u32 val;
7764         void (*write_op)(struct tg3 *, u32, u32);
7765         int i, err;
7766
7767         tg3_nvram_lock(tp);
7768
7769         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7770
7771         /* No matching tg3_nvram_unlock() after this because
7772          * chip reset below will undo the nvram lock.
7773          */
7774         tp->nvram_lock_cnt = 0;
7775
7776         /* GRC_MISC_CFG core clock reset will clear the memory
7777          * enable bit in PCI register 4 and the MSI enable bit
7778          * on some chips, so we save relevant registers here.
7779          */
7780         tg3_save_pci_state(tp);
7781
7782         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7783             tg3_flag(tp, 5755_PLUS))
7784                 tw32(GRC_FASTBOOT_PC, 0);
7785
7786         /*
7787          * We must avoid the readl() that normally takes place.
7788          * It locks machines, causes machine checks, and other
7789          * fun things.  So, temporarily disable the 5701
7790          * hardware workaround, while we do the reset.
7791          */
7792         write_op = tp->write32;
7793         if (write_op == tg3_write_flush_reg32)
7794                 tp->write32 = tg3_write32;
7795
7796         /* Prevent the irq handler from reading or writing PCI registers
7797          * during chip reset when the memory enable bit in the PCI command
7798          * register may be cleared.  The chip does not generate interrupt
7799          * at this time, but the irq handler may still be called due to irq
7800          * sharing or irqpoll.
7801          */
7802         tg3_flag_set(tp, CHIP_RESETTING);
7803         for (i = 0; i < tp->irq_cnt; i++) {
7804                 struct tg3_napi *tnapi = &tp->napi[i];
7805                 if (tnapi->hw_status) {
7806                         tnapi->hw_status->status = 0;
7807                         tnapi->hw_status->status_tag = 0;
7808                 }
7809                 tnapi->last_tag = 0;
7810                 tnapi->last_irq_tag = 0;
7811         }
7812         smp_mb();
7813
7814         for (i = 0; i < tp->irq_cnt; i++)
7815                 synchronize_irq(tp->napi[i].irq_vec);
7816
7817         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7818                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7819                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7820         }
7821
7822         /* do the reset */
7823         val = GRC_MISC_CFG_CORECLK_RESET;
7824
7825         if (tg3_flag(tp, PCI_EXPRESS)) {
7826                 /* Force PCIe 1.0a mode */
7827                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7828                     !tg3_flag(tp, 57765_PLUS) &&
7829                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7830                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7831                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7832
7833                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7834                         tw32(GRC_MISC_CFG, (1 << 29));
7835                         val |= (1 << 29);
7836                 }
7837         }
7838
7839         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7840                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7841                 tw32(GRC_VCPU_EXT_CTRL,
7842                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7843         }
7844
7845         /* Manage gphy power for all CPMU absent PCIe devices. */
7846         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7847                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7848
7849         tw32(GRC_MISC_CFG, val);
7850
7851         /* restore 5701 hardware bug workaround write method */
7852         tp->write32 = write_op;
7853
7854         /* Unfortunately, we have to delay before the PCI read back.
7855          * Some 575X chips even will not respond to a PCI cfg access
7856          * when the reset command is given to the chip.
7857          *
7858          * How do these hardware designers expect things to work
7859          * properly if the PCI write is posted for a long period
7860          * of time?  It is always necessary to have some method by
7861          * which a register read back can occur to push the write
7862          * out which does the reset.
7863          *
7864          * For most tg3 variants the trick below was working.
7865          * Ho hum...
7866          */
7867         udelay(120);
7868
7869         /* Flush PCI posted writes.  The normal MMIO registers
7870          * are inaccessible at this time so this is the only
7871          * way to make this reliably (actually, this is no longer
7872          * the case, see above).  I tried to use indirect
7873          * register read/write but this upset some 5701 variants.
7874          */
7875         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7876
7877         udelay(120);
7878
7879         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7880                 u16 val16;
7881
7882                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7883                         int i;
7884                         u32 cfg_val;
7885
7886                         /* Wait for link training to complete.  */
7887                         for (i = 0; i < 5000; i++)
7888                                 udelay(100);
7889
7890                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7891                         pci_write_config_dword(tp->pdev, 0xc4,
7892                                                cfg_val | (1 << 15));
7893                 }
7894
7895                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7896                 pci_read_config_word(tp->pdev,
7897                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7898                                      &val16);
7899                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7900                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7901                 /*
7902                  * Older PCIe devices only support the 128 byte
7903                  * MPS setting.  Enforce the restriction.
7904                  */
7905                 if (!tg3_flag(tp, CPMU_PRESENT))
7906                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7907                 pci_write_config_word(tp->pdev,
7908                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7909                                       val16);
7910
7911                 /* Clear error status */
7912                 pci_write_config_word(tp->pdev,
7913                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7914                                       PCI_EXP_DEVSTA_CED |
7915                                       PCI_EXP_DEVSTA_NFED |
7916                                       PCI_EXP_DEVSTA_FED |
7917                                       PCI_EXP_DEVSTA_URD);
7918         }
7919
7920         tg3_restore_pci_state(tp);
7921
7922         tg3_flag_clear(tp, CHIP_RESETTING);
7923         tg3_flag_clear(tp, ERROR_PROCESSED);
7924
7925         val = 0;
7926         if (tg3_flag(tp, 5780_CLASS))
7927                 val = tr32(MEMARB_MODE);
7928         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7929
7930         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7931                 tg3_stop_fw(tp);
7932                 tw32(0x5000, 0x400);
7933         }
7934
7935         tw32(GRC_MODE, tp->grc_mode);
7936
7937         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7938                 val = tr32(0xc4);
7939
7940                 tw32(0xc4, val | (1 << 15));
7941         }
7942
7943         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7945                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7946                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7947                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7948                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7949         }
7950
7951         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7952                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7953                 val = tp->mac_mode;
7954         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7955                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7956                 val = tp->mac_mode;
7957         } else
7958                 val = 0;
7959
7960         tw32_f(MAC_MODE, val);
7961         udelay(40);
7962
7963         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7964
7965         err = tg3_poll_fw(tp);
7966         if (err)
7967                 return err;
7968
7969         tg3_mdio_start(tp);
7970
7971         if (tg3_flag(tp, PCI_EXPRESS) &&
7972             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7973             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7974             !tg3_flag(tp, 57765_PLUS)) {
7975                 val = tr32(0x7c00);
7976
7977                 tw32(0x7c00, val | (1 << 25));
7978         }
7979
7980         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7981                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7982                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7983         }
7984
7985         /* Reprobe ASF enable state.  */
7986         tg3_flag_clear(tp, ENABLE_ASF);
7987         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7988         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7989         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7990                 u32 nic_cfg;
7991
7992                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7993                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7994                         tg3_flag_set(tp, ENABLE_ASF);
7995                         tp->last_event_jiffies = jiffies;
7996                         if (tg3_flag(tp, 5750_PLUS))
7997                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7998                 }
7999         }
8000
8001         return 0;
8002 }
8003
8004 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8005 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8006
8007 /* tp->lock is held. */
8008 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8009 {
8010         int err;
8011
8012         tg3_stop_fw(tp);
8013
8014         tg3_write_sig_pre_reset(tp, kind);
8015
8016         tg3_abort_hw(tp, silent);
8017         err = tg3_chip_reset(tp);
8018
8019         __tg3_set_mac_addr(tp, 0);
8020
8021         tg3_write_sig_legacy(tp, kind);
8022         tg3_write_sig_post_reset(tp, kind);
8023
8024         if (tp->hw_stats) {
8025                 /* Save the stats across chip resets... */
8026                 tg3_get_nstats(tp, &tp->net_stats_prev);
8027                 tg3_get_estats(tp, &tp->estats_prev);
8028
8029                 /* And make sure the next sample is new data */
8030                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8031         }
8032
8033         if (err)
8034                 return err;
8035
8036         return 0;
8037 }
8038
8039 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8040 {
8041         struct tg3 *tp = netdev_priv(dev);
8042         struct sockaddr *addr = p;
8043         int err = 0, skip_mac_1 = 0;
8044
8045         if (!is_valid_ether_addr(addr->sa_data))
8046                 return -EADDRNOTAVAIL;
8047
8048         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8049
8050         if (!netif_running(dev))
8051                 return 0;
8052
8053         if (tg3_flag(tp, ENABLE_ASF)) {
8054                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8055
8056                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8057                 addr0_low = tr32(MAC_ADDR_0_LOW);
8058                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8059                 addr1_low = tr32(MAC_ADDR_1_LOW);
8060
8061                 /* Skip MAC addr 1 if ASF is using it. */
8062                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8063                     !(addr1_high == 0 && addr1_low == 0))
8064                         skip_mac_1 = 1;
8065         }
8066         spin_lock_bh(&tp->lock);
8067         __tg3_set_mac_addr(tp, skip_mac_1);
8068         spin_unlock_bh(&tp->lock);
8069
8070         return err;
8071 }
8072
8073 /* tp->lock is held. */
8074 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8075                            dma_addr_t mapping, u32 maxlen_flags,
8076                            u32 nic_addr)
8077 {
8078         tg3_write_mem(tp,
8079                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8080                       ((u64) mapping >> 32));
8081         tg3_write_mem(tp,
8082                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8083                       ((u64) mapping & 0xffffffff));
8084         tg3_write_mem(tp,
8085                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8086                        maxlen_flags);
8087
8088         if (!tg3_flag(tp, 5705_PLUS))
8089                 tg3_write_mem(tp,
8090                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8091                               nic_addr);
8092 }
8093
8094 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8095 {
8096         int i;
8097
8098         if (!tg3_flag(tp, ENABLE_TSS)) {
8099                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8100                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8101                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8102         } else {
8103                 tw32(HOSTCC_TXCOL_TICKS, 0);
8104                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8105                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8106         }
8107
8108         if (!tg3_flag(tp, ENABLE_RSS)) {
8109                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8110                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8111                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8112         } else {
8113                 tw32(HOSTCC_RXCOL_TICKS, 0);
8114                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8115                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8116         }
8117
8118         if (!tg3_flag(tp, 5705_PLUS)) {
8119                 u32 val = ec->stats_block_coalesce_usecs;
8120
8121                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8122                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8123
8124                 if (!netif_carrier_ok(tp->dev))
8125                         val = 0;
8126
8127                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8128         }
8129
8130         for (i = 0; i < tp->irq_cnt - 1; i++) {
8131                 u32 reg;
8132
8133                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8134                 tw32(reg, ec->rx_coalesce_usecs);
8135                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8136                 tw32(reg, ec->rx_max_coalesced_frames);
8137                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8138                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8139
8140                 if (tg3_flag(tp, ENABLE_TSS)) {
8141                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8142                         tw32(reg, ec->tx_coalesce_usecs);
8143                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8144                         tw32(reg, ec->tx_max_coalesced_frames);
8145                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8146                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8147                 }
8148         }
8149
8150         for (; i < tp->irq_max - 1; i++) {
8151                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8152                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8153                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8154
8155                 if (tg3_flag(tp, ENABLE_TSS)) {
8156                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8157                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8158                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8159                 }
8160         }
8161 }
8162
8163 /* tp->lock is held. */
8164 static void tg3_rings_reset(struct tg3 *tp)
8165 {
8166         int i;
8167         u32 stblk, txrcb, rxrcb, limit;
8168         struct tg3_napi *tnapi = &tp->napi[0];
8169
8170         /* Disable all transmit rings but the first. */
8171         if (!tg3_flag(tp, 5705_PLUS))
8172                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8173         else if (tg3_flag(tp, 5717_PLUS))
8174                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8175         else if (tg3_flag(tp, 57765_CLASS))
8176                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8177         else
8178                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8179
8180         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8181              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8182                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8183                               BDINFO_FLAGS_DISABLED);
8184
8185
8186         /* Disable all receive return rings but the first. */
8187         if (tg3_flag(tp, 5717_PLUS))
8188                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8189         else if (!tg3_flag(tp, 5705_PLUS))
8190                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8191         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8192                  tg3_flag(tp, 57765_CLASS))
8193                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8194         else
8195                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8196
8197         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8198              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8199                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8200                               BDINFO_FLAGS_DISABLED);
8201
8202         /* Disable interrupts */
8203         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8204         tp->napi[0].chk_msi_cnt = 0;
8205         tp->napi[0].last_rx_cons = 0;
8206         tp->napi[0].last_tx_cons = 0;
8207
8208         /* Zero mailbox registers. */
8209         if (tg3_flag(tp, SUPPORT_MSIX)) {
8210                 for (i = 1; i < tp->irq_max; i++) {
8211                         tp->napi[i].tx_prod = 0;
8212                         tp->napi[i].tx_cons = 0;
8213                         if (tg3_flag(tp, ENABLE_TSS))
8214                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8215                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8216                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8217                         tp->napi[i].chk_msi_cnt = 0;
8218                         tp->napi[i].last_rx_cons = 0;
8219                         tp->napi[i].last_tx_cons = 0;
8220                 }
8221                 if (!tg3_flag(tp, ENABLE_TSS))
8222                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8223         } else {
8224                 tp->napi[0].tx_prod = 0;
8225                 tp->napi[0].tx_cons = 0;
8226                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8227                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8228         }
8229
8230         /* Make sure the NIC-based send BD rings are disabled. */
8231         if (!tg3_flag(tp, 5705_PLUS)) {
8232                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8233                 for (i = 0; i < 16; i++)
8234                         tw32_tx_mbox(mbox + i * 8, 0);
8235         }
8236
8237         txrcb = NIC_SRAM_SEND_RCB;
8238         rxrcb = NIC_SRAM_RCV_RET_RCB;
8239
8240         /* Clear status block in ram. */
8241         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8242
8243         /* Set status block DMA address */
8244         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8245              ((u64) tnapi->status_mapping >> 32));
8246         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8247              ((u64) tnapi->status_mapping & 0xffffffff));
8248
8249         if (tnapi->tx_ring) {
8250                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8251                                (TG3_TX_RING_SIZE <<
8252                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8253                                NIC_SRAM_TX_BUFFER_DESC);
8254                 txrcb += TG3_BDINFO_SIZE;
8255         }
8256
8257         if (tnapi->rx_rcb) {
8258                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8259                                (tp->rx_ret_ring_mask + 1) <<
8260                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8261                 rxrcb += TG3_BDINFO_SIZE;
8262         }
8263
8264         stblk = HOSTCC_STATBLCK_RING1;
8265
8266         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8267                 u64 mapping = (u64)tnapi->status_mapping;
8268                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8269                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8270
8271                 /* Clear status block in ram. */
8272                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8273
8274                 if (tnapi->tx_ring) {
8275                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8276                                        (TG3_TX_RING_SIZE <<
8277                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8278                                        NIC_SRAM_TX_BUFFER_DESC);
8279                         txrcb += TG3_BDINFO_SIZE;
8280                 }
8281
8282                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8283                                ((tp->rx_ret_ring_mask + 1) <<
8284                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8285
8286                 stblk += 8;
8287                 rxrcb += TG3_BDINFO_SIZE;
8288         }
8289 }
8290
8291 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8292 {
8293         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8294
8295         if (!tg3_flag(tp, 5750_PLUS) ||
8296             tg3_flag(tp, 5780_CLASS) ||
8297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8298             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8299             tg3_flag(tp, 57765_PLUS))
8300                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8301         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8302                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8303                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8304         else
8305                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8306
8307         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8308         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8309
8310         val = min(nic_rep_thresh, host_rep_thresh);
8311         tw32(RCVBDI_STD_THRESH, val);
8312
8313         if (tg3_flag(tp, 57765_PLUS))
8314                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8315
8316         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8317                 return;
8318
8319         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8320
8321         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8322
8323         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8324         tw32(RCVBDI_JUMBO_THRESH, val);
8325
8326         if (tg3_flag(tp, 57765_PLUS))
8327                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8328 }
8329
8330 static inline u32 calc_crc(unsigned char *buf, int len)
8331 {
8332         u32 reg;
8333         u32 tmp;
8334         int j, k;
8335
8336         reg = 0xffffffff;
8337
8338         for (j = 0; j < len; j++) {
8339                 reg ^= buf[j];
8340
8341                 for (k = 0; k < 8; k++) {
8342                         tmp = reg & 0x01;
8343
8344                         reg >>= 1;
8345
8346                         if (tmp)
8347                                 reg ^= 0xedb88320;
8348                 }
8349         }
8350
8351         return ~reg;
8352 }
8353
8354 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8355 {
8356         /* accept or reject all multicast frames */
8357         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8358         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8359         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8360         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8361 }
8362
8363 static void __tg3_set_rx_mode(struct net_device *dev)
8364 {
8365         struct tg3 *tp = netdev_priv(dev);
8366         u32 rx_mode;
8367
8368         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8369                                   RX_MODE_KEEP_VLAN_TAG);
8370
8371 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8372         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8373          * flag clear.
8374          */
8375         if (!tg3_flag(tp, ENABLE_ASF))
8376                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8377 #endif
8378
8379         if (dev->flags & IFF_PROMISC) {
8380                 /* Promiscuous mode. */
8381                 rx_mode |= RX_MODE_PROMISC;
8382         } else if (dev->flags & IFF_ALLMULTI) {
8383                 /* Accept all multicast. */
8384                 tg3_set_multi(tp, 1);
8385         } else if (netdev_mc_empty(dev)) {
8386                 /* Reject all multicast. */
8387                 tg3_set_multi(tp, 0);
8388         } else {
8389                 /* Accept one or more multicast(s). */
8390                 struct netdev_hw_addr *ha;
8391                 u32 mc_filter[4] = { 0, };
8392                 u32 regidx;
8393                 u32 bit;
8394                 u32 crc;
8395
8396                 netdev_for_each_mc_addr(ha, dev) {
8397                         crc = calc_crc(ha->addr, ETH_ALEN);
8398                         bit = ~crc & 0x7f;
8399                         regidx = (bit & 0x60) >> 5;
8400                         bit &= 0x1f;
8401                         mc_filter[regidx] |= (1 << bit);
8402                 }
8403
8404                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8405                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8406                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8407                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8408         }
8409
8410         if (rx_mode != tp->rx_mode) {
8411                 tp->rx_mode = rx_mode;
8412                 tw32_f(MAC_RX_MODE, rx_mode);
8413                 udelay(10);
8414         }
8415 }
8416
8417 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8418 {
8419         int i;
8420
8421         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8422                 tp->rss_ind_tbl[i] =
8423                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8424 }
8425
8426 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8427 {
8428         int i;
8429
8430         if (!tg3_flag(tp, SUPPORT_MSIX))
8431                 return;
8432
8433         if (tp->irq_cnt <= 2) {
8434                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8435                 return;
8436         }
8437
8438         /* Validate table against current IRQ count */
8439         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8440                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8441                         break;
8442         }
8443
8444         if (i != TG3_RSS_INDIR_TBL_SIZE)
8445                 tg3_rss_init_dflt_indir_tbl(tp);
8446 }
8447
8448 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8449 {
8450         int i = 0;
8451         u32 reg = MAC_RSS_INDIR_TBL_0;
8452
8453         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8454                 u32 val = tp->rss_ind_tbl[i];
8455                 i++;
8456                 for (; i % 8; i++) {
8457                         val <<= 4;
8458                         val |= tp->rss_ind_tbl[i];
8459                 }
8460                 tw32(reg, val);
8461                 reg += 4;
8462         }
8463 }
8464
8465 /* tp->lock is held. */
8466 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8467 {
8468         u32 val, rdmac_mode;
8469         int i, err, limit;
8470         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8471
8472         tg3_disable_ints(tp);
8473
8474         tg3_stop_fw(tp);
8475
8476         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8477
8478         if (tg3_flag(tp, INIT_COMPLETE))
8479                 tg3_abort_hw(tp, 1);
8480
8481         /* Enable MAC control of LPI */
8482         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8483                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8484                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8485                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8486
8487                 tw32_f(TG3_CPMU_EEE_CTRL,
8488                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8489
8490                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8491                       TG3_CPMU_EEEMD_LPI_IN_TX |
8492                       TG3_CPMU_EEEMD_LPI_IN_RX |
8493                       TG3_CPMU_EEEMD_EEE_ENABLE;
8494
8495                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8496                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8497
8498                 if (tg3_flag(tp, ENABLE_APE))
8499                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8500
8501                 tw32_f(TG3_CPMU_EEE_MODE, val);
8502
8503                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8504                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8505                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8506
8507                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8508                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8509                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8510         }
8511
8512         if (reset_phy)
8513                 tg3_phy_reset(tp);
8514
8515         err = tg3_chip_reset(tp);
8516         if (err)
8517                 return err;
8518
8519         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8520
8521         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8522                 val = tr32(TG3_CPMU_CTRL);
8523                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8524                 tw32(TG3_CPMU_CTRL, val);
8525
8526                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8527                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8528                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8529                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8530
8531                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8532                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8533                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8534                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8535
8536                 val = tr32(TG3_CPMU_HST_ACC);
8537                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8538                 val |= CPMU_HST_ACC_MACCLK_6_25;
8539                 tw32(TG3_CPMU_HST_ACC, val);
8540         }
8541
8542         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8543                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8544                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8545                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8546                 tw32(PCIE_PWR_MGMT_THRESH, val);
8547
8548                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8549                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8550
8551                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8552
8553                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8554                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8555         }
8556
8557         if (tg3_flag(tp, L1PLLPD_EN)) {
8558                 u32 grc_mode = tr32(GRC_MODE);
8559
8560                 /* Access the lower 1K of PL PCIE block registers. */
8561                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8562                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8563
8564                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8565                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8566                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8567
8568                 tw32(GRC_MODE, grc_mode);
8569         }
8570
8571         if (tg3_flag(tp, 57765_CLASS)) {
8572                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8573                         u32 grc_mode = tr32(GRC_MODE);
8574
8575                         /* Access the lower 1K of PL PCIE block registers. */
8576                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8577                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8578
8579                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8580                                    TG3_PCIE_PL_LO_PHYCTL5);
8581                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8582                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8583
8584                         tw32(GRC_MODE, grc_mode);
8585                 }
8586
8587                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8588                         u32 grc_mode = tr32(GRC_MODE);
8589
8590                         /* Access the lower 1K of DL PCIE block registers. */
8591                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8592                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8593
8594                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8595                                    TG3_PCIE_DL_LO_FTSMAX);
8596                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8597                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8598                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8599
8600                         tw32(GRC_MODE, grc_mode);
8601                 }
8602
8603                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8604                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8605                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8606                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8607         }
8608
8609         /* This works around an issue with Athlon chipsets on
8610          * B3 tigon3 silicon.  This bit has no effect on any
8611          * other revision.  But do not set this on PCI Express
8612          * chips and don't even touch the clocks if the CPMU is present.
8613          */
8614         if (!tg3_flag(tp, CPMU_PRESENT)) {
8615                 if (!tg3_flag(tp, PCI_EXPRESS))
8616                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8617                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8618         }
8619
8620         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8621             tg3_flag(tp, PCIX_MODE)) {
8622                 val = tr32(TG3PCI_PCISTATE);
8623                 val |= PCISTATE_RETRY_SAME_DMA;
8624                 tw32(TG3PCI_PCISTATE, val);
8625         }
8626
8627         if (tg3_flag(tp, ENABLE_APE)) {
8628                 /* Allow reads and writes to the
8629                  * APE register and memory space.
8630                  */
8631                 val = tr32(TG3PCI_PCISTATE);
8632                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8633                        PCISTATE_ALLOW_APE_SHMEM_WR |
8634                        PCISTATE_ALLOW_APE_PSPACE_WR;
8635                 tw32(TG3PCI_PCISTATE, val);
8636         }
8637
8638         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8639                 /* Enable some hw fixes.  */
8640                 val = tr32(TG3PCI_MSI_DATA);
8641                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8642                 tw32(TG3PCI_MSI_DATA, val);
8643         }
8644
8645         /* Descriptor ring init may make accesses to the
8646          * NIC SRAM area to setup the TX descriptors, so we
8647          * can only do this after the hardware has been
8648          * successfully reset.
8649          */
8650         err = tg3_init_rings(tp);
8651         if (err)
8652                 return err;
8653
8654         if (tg3_flag(tp, 57765_PLUS)) {
8655                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8656                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8657                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8658                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8659                 if (!tg3_flag(tp, 57765_CLASS) &&
8660                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8661                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8662                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8663         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8664                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8665                 /* This value is determined during the probe time DMA
8666                  * engine test, tg3_test_dma.
8667                  */
8668                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8669         }
8670
8671         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8672                           GRC_MODE_4X_NIC_SEND_RINGS |
8673                           GRC_MODE_NO_TX_PHDR_CSUM |
8674                           GRC_MODE_NO_RX_PHDR_CSUM);
8675         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8676
8677         /* Pseudo-header checksum is done by hardware logic and not
8678          * the offload processers, so make the chip do the pseudo-
8679          * header checksums on receive.  For transmit it is more
8680          * convenient to do the pseudo-header checksum in software
8681          * as Linux does that on transmit for us in all cases.
8682          */
8683         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8684
8685         tw32(GRC_MODE,
8686              tp->grc_mode |
8687              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8688
8689         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8690         val = tr32(GRC_MISC_CFG);
8691         val &= ~0xff;
8692         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8693         tw32(GRC_MISC_CFG, val);
8694
8695         /* Initialize MBUF/DESC pool. */
8696         if (tg3_flag(tp, 5750_PLUS)) {
8697                 /* Do nothing.  */
8698         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8699                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8700                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8701                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8702                 else
8703                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8704                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8705                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8706         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8707                 int fw_len;
8708
8709                 fw_len = tp->fw_len;
8710                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8711                 tw32(BUFMGR_MB_POOL_ADDR,
8712                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8713                 tw32(BUFMGR_MB_POOL_SIZE,
8714                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8715         }
8716
8717         if (tp->dev->mtu <= ETH_DATA_LEN) {
8718                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8719                      tp->bufmgr_config.mbuf_read_dma_low_water);
8720                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8721                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8722                 tw32(BUFMGR_MB_HIGH_WATER,
8723                      tp->bufmgr_config.mbuf_high_water);
8724         } else {
8725                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8726                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8727                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8728                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8729                 tw32(BUFMGR_MB_HIGH_WATER,
8730                      tp->bufmgr_config.mbuf_high_water_jumbo);
8731         }
8732         tw32(BUFMGR_DMA_LOW_WATER,
8733              tp->bufmgr_config.dma_low_water);
8734         tw32(BUFMGR_DMA_HIGH_WATER,
8735              tp->bufmgr_config.dma_high_water);
8736
8737         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8738         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8739                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8741             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8742             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8743                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8744         tw32(BUFMGR_MODE, val);
8745         for (i = 0; i < 2000; i++) {
8746                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8747                         break;
8748                 udelay(10);
8749         }
8750         if (i >= 2000) {
8751                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8752                 return -ENODEV;
8753         }
8754
8755         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8756                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8757
8758         tg3_setup_rxbd_thresholds(tp);
8759
8760         /* Initialize TG3_BDINFO's at:
8761          *  RCVDBDI_STD_BD:     standard eth size rx ring
8762          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8763          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8764          *
8765          * like so:
8766          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8767          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8768          *                              ring attribute flags
8769          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8770          *
8771          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8772          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8773          *
8774          * The size of each ring is fixed in the firmware, but the location is
8775          * configurable.
8776          */
8777         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8778              ((u64) tpr->rx_std_mapping >> 32));
8779         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8780              ((u64) tpr->rx_std_mapping & 0xffffffff));
8781         if (!tg3_flag(tp, 5717_PLUS))
8782                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8783                      NIC_SRAM_RX_BUFFER_DESC);
8784
8785         /* Disable the mini ring */
8786         if (!tg3_flag(tp, 5705_PLUS))
8787                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8788                      BDINFO_FLAGS_DISABLED);
8789
8790         /* Program the jumbo buffer descriptor ring control
8791          * blocks on those devices that have them.
8792          */
8793         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8794             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8795
8796                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8797                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8798                              ((u64) tpr->rx_jmb_mapping >> 32));
8799                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8800                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8801                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8802                               BDINFO_FLAGS_MAXLEN_SHIFT;
8803                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8804                              val | BDINFO_FLAGS_USE_EXT_RECV);
8805                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8806                             tg3_flag(tp, 57765_CLASS))
8807                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8808                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8809                 } else {
8810                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8811                              BDINFO_FLAGS_DISABLED);
8812                 }
8813
8814                 if (tg3_flag(tp, 57765_PLUS)) {
8815                         val = TG3_RX_STD_RING_SIZE(tp);
8816                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8817                         val |= (TG3_RX_STD_DMA_SZ << 2);
8818                 } else
8819                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8820         } else
8821                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8822
8823         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8824
8825         tpr->rx_std_prod_idx = tp->rx_pending;
8826         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8827
8828         tpr->rx_jmb_prod_idx =
8829                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8830         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8831
8832         tg3_rings_reset(tp);
8833
8834         /* Initialize MAC address and backoff seed. */
8835         __tg3_set_mac_addr(tp, 0);
8836
8837         /* MTU + ethernet header + FCS + optional VLAN tag */
8838         tw32(MAC_RX_MTU_SIZE,
8839              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8840
8841         /* The slot time is changed by tg3_setup_phy if we
8842          * run at gigabit with half duplex.
8843          */
8844         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8845               (6 << TX_LENGTHS_IPG_SHIFT) |
8846               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8847
8848         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8849                 val |= tr32(MAC_TX_LENGTHS) &
8850                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8851                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8852
8853         tw32(MAC_TX_LENGTHS, val);
8854
8855         /* Receive rules. */
8856         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8857         tw32(RCVLPC_CONFIG, 0x0181);
8858
8859         /* Calculate RDMAC_MODE setting early, we need it to determine
8860          * the RCVLPC_STATE_ENABLE mask.
8861          */
8862         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8863                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8864                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8865                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8866                       RDMAC_MODE_LNGREAD_ENAB);
8867
8868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8869                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8870
8871         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8872             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8873             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8874                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8875                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8876                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8877
8878         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8879             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8880                 if (tg3_flag(tp, TSO_CAPABLE) &&
8881                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8882                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8883                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8884                            !tg3_flag(tp, IS_5788)) {
8885                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8886                 }
8887         }
8888
8889         if (tg3_flag(tp, PCI_EXPRESS))
8890                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8891
8892         if (tg3_flag(tp, HW_TSO_1) ||
8893             tg3_flag(tp, HW_TSO_2) ||
8894             tg3_flag(tp, HW_TSO_3))
8895                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8896
8897         if (tg3_flag(tp, 57765_PLUS) ||
8898             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8899             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8900                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8901
8902         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8903                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8904
8905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8906             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8907             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8908             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8909             tg3_flag(tp, 57765_PLUS)) {
8910                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8911                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8912                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8913                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8914                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8915                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8916                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8917                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8918                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8919                 }
8920                 tw32(TG3_RDMA_RSRVCTRL_REG,
8921                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8922         }
8923
8924         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8926                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8927                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8928                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8929                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8930         }
8931
8932         /* Receive/send statistics. */
8933         if (tg3_flag(tp, 5750_PLUS)) {
8934                 val = tr32(RCVLPC_STATS_ENABLE);
8935                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8936                 tw32(RCVLPC_STATS_ENABLE, val);
8937         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8938                    tg3_flag(tp, TSO_CAPABLE)) {
8939                 val = tr32(RCVLPC_STATS_ENABLE);
8940                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8941                 tw32(RCVLPC_STATS_ENABLE, val);
8942         } else {
8943                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8944         }
8945         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8946         tw32(SNDDATAI_STATSENAB, 0xffffff);
8947         tw32(SNDDATAI_STATSCTRL,
8948              (SNDDATAI_SCTRL_ENABLE |
8949               SNDDATAI_SCTRL_FASTUPD));
8950
8951         /* Setup host coalescing engine. */
8952         tw32(HOSTCC_MODE, 0);
8953         for (i = 0; i < 2000; i++) {
8954                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8955                         break;
8956                 udelay(10);
8957         }
8958
8959         __tg3_set_coalesce(tp, &tp->coal);
8960
8961         if (!tg3_flag(tp, 5705_PLUS)) {
8962                 /* Status/statistics block address.  See tg3_timer,
8963                  * the tg3_periodic_fetch_stats call there, and
8964                  * tg3_get_stats to see how this works for 5705/5750 chips.
8965                  */
8966                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8967                      ((u64) tp->stats_mapping >> 32));
8968                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8969                      ((u64) tp->stats_mapping & 0xffffffff));
8970                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8971
8972                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8973
8974                 /* Clear statistics and status block memory areas */
8975                 for (i = NIC_SRAM_STATS_BLK;
8976                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8977                      i += sizeof(u32)) {
8978                         tg3_write_mem(tp, i, 0);
8979                         udelay(40);
8980                 }
8981         }
8982
8983         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8984
8985         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8986         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8987         if (!tg3_flag(tp, 5705_PLUS))
8988                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8989
8990         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8991                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8992                 /* reset to prevent losing 1st rx packet intermittently */
8993                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8994                 udelay(10);
8995         }
8996
8997         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8998                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8999                         MAC_MODE_FHDE_ENABLE;
9000         if (tg3_flag(tp, ENABLE_APE))
9001                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9002         if (!tg3_flag(tp, 5705_PLUS) &&
9003             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9004             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9005                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9006         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9007         udelay(40);
9008
9009         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9010          * If TG3_FLAG_IS_NIC is zero, we should read the
9011          * register to preserve the GPIO settings for LOMs. The GPIOs,
9012          * whether used as inputs or outputs, are set by boot code after
9013          * reset.
9014          */
9015         if (!tg3_flag(tp, IS_NIC)) {
9016                 u32 gpio_mask;
9017
9018                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9019                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9020                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9021
9022                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9023                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9024                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9025
9026                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9027                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9028
9029                 tp->grc_local_ctrl &= ~gpio_mask;
9030                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9031
9032                 /* GPIO1 must be driven high for eeprom write protect */
9033                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9034                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9035                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9036         }
9037         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9038         udelay(100);
9039
9040         if (tg3_flag(tp, USING_MSIX)) {
9041                 val = tr32(MSGINT_MODE);
9042                 val |= MSGINT_MODE_ENABLE;
9043                 if (tp->irq_cnt > 1)
9044                         val |= MSGINT_MODE_MULTIVEC_EN;
9045                 if (!tg3_flag(tp, 1SHOT_MSI))
9046                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9047                 tw32(MSGINT_MODE, val);
9048         }
9049
9050         if (!tg3_flag(tp, 5705_PLUS)) {
9051                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9052                 udelay(40);
9053         }
9054
9055         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9056                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9057                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9058                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9059                WDMAC_MODE_LNGREAD_ENAB);
9060
9061         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9062             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9063                 if (tg3_flag(tp, TSO_CAPABLE) &&
9064                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9065                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9066                         /* nothing */
9067                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9068                            !tg3_flag(tp, IS_5788)) {
9069                         val |= WDMAC_MODE_RX_ACCEL;
9070                 }
9071         }
9072
9073         /* Enable host coalescing bug fix */
9074         if (tg3_flag(tp, 5755_PLUS))
9075                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9076
9077         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9078                 val |= WDMAC_MODE_BURST_ALL_DATA;
9079
9080         tw32_f(WDMAC_MODE, val);
9081         udelay(40);
9082
9083         if (tg3_flag(tp, PCIX_MODE)) {
9084                 u16 pcix_cmd;
9085
9086                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9087                                      &pcix_cmd);
9088                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9089                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9090                         pcix_cmd |= PCI_X_CMD_READ_2K;
9091                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9092                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9093                         pcix_cmd |= PCI_X_CMD_READ_2K;
9094                 }
9095                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9096                                       pcix_cmd);
9097         }
9098
9099         tw32_f(RDMAC_MODE, rdmac_mode);
9100         udelay(40);
9101
9102         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9103         if (!tg3_flag(tp, 5705_PLUS))
9104                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9105
9106         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9107                 tw32(SNDDATAC_MODE,
9108                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9109         else
9110                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9111
9112         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9113         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9114         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9115         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9116                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9117         tw32(RCVDBDI_MODE, val);
9118         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9119         if (tg3_flag(tp, HW_TSO_1) ||
9120             tg3_flag(tp, HW_TSO_2) ||
9121             tg3_flag(tp, HW_TSO_3))
9122                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9123         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9124         if (tg3_flag(tp, ENABLE_TSS))
9125                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9126         tw32(SNDBDI_MODE, val);
9127         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9128
9129         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9130                 err = tg3_load_5701_a0_firmware_fix(tp);
9131                 if (err)
9132                         return err;
9133         }
9134
9135         if (tg3_flag(tp, TSO_CAPABLE)) {
9136                 err = tg3_load_tso_firmware(tp);
9137                 if (err)
9138                         return err;
9139         }
9140
9141         tp->tx_mode = TX_MODE_ENABLE;
9142
9143         if (tg3_flag(tp, 5755_PLUS) ||
9144             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9145                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9146
9147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9148                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9149                 tp->tx_mode &= ~val;
9150                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9151         }
9152
9153         tw32_f(MAC_TX_MODE, tp->tx_mode);
9154         udelay(100);
9155
9156         if (tg3_flag(tp, ENABLE_RSS)) {
9157                 tg3_rss_write_indir_tbl(tp);
9158
9159                 /* Setup the "secret" hash key. */
9160                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9161                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9162                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9163                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9164                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9165                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9166                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9167                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9168                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9169                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9170         }
9171
9172         tp->rx_mode = RX_MODE_ENABLE;
9173         if (tg3_flag(tp, 5755_PLUS))
9174                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9175
9176         if (tg3_flag(tp, ENABLE_RSS))
9177                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9178                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9179                                RX_MODE_RSS_IPV6_HASH_EN |
9180                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9181                                RX_MODE_RSS_IPV4_HASH_EN |
9182                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9183
9184         tw32_f(MAC_RX_MODE, tp->rx_mode);
9185         udelay(10);
9186
9187         tw32(MAC_LED_CTRL, tp->led_ctrl);
9188
9189         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9190         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9191                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9192                 udelay(10);
9193         }
9194         tw32_f(MAC_RX_MODE, tp->rx_mode);
9195         udelay(10);
9196
9197         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9198                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9199                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9200                         /* Set drive transmission level to 1.2V  */
9201                         /* only if the signal pre-emphasis bit is not set  */
9202                         val = tr32(MAC_SERDES_CFG);
9203                         val &= 0xfffff000;
9204                         val |= 0x880;
9205                         tw32(MAC_SERDES_CFG, val);
9206                 }
9207                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9208                         tw32(MAC_SERDES_CFG, 0x616000);
9209         }
9210
9211         /* Prevent chip from dropping frames when flow control
9212          * is enabled.
9213          */
9214         if (tg3_flag(tp, 57765_CLASS))
9215                 val = 1;
9216         else
9217                 val = 2;
9218         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9219
9220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9221             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9222                 /* Use hardware link auto-negotiation */
9223                 tg3_flag_set(tp, HW_AUTONEG);
9224         }
9225
9226         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9227             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9228                 u32 tmp;
9229
9230                 tmp = tr32(SERDES_RX_CTRL);
9231                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9232                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9233                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9234                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9235         }
9236
9237         if (!tg3_flag(tp, USE_PHYLIB)) {
9238                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9239                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9240
9241                 err = tg3_setup_phy(tp, 0);
9242                 if (err)
9243                         return err;
9244
9245                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9246                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9247                         u32 tmp;
9248
9249                         /* Clear CRC stats. */
9250                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9251                                 tg3_writephy(tp, MII_TG3_TEST1,
9252                                              tmp | MII_TG3_TEST1_CRC_EN);
9253                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9254                         }
9255                 }
9256         }
9257
9258         __tg3_set_rx_mode(tp->dev);
9259
9260         /* Initialize receive rules. */
9261         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9262         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9263         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9264         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9265
9266         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9267                 limit = 8;
9268         else
9269                 limit = 16;
9270         if (tg3_flag(tp, ENABLE_ASF))
9271                 limit -= 4;
9272         switch (limit) {
9273         case 16:
9274                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9275         case 15:
9276                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9277         case 14:
9278                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9279         case 13:
9280                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9281         case 12:
9282                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9283         case 11:
9284                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9285         case 10:
9286                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9287         case 9:
9288                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9289         case 8:
9290                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9291         case 7:
9292                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9293         case 6:
9294                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9295         case 5:
9296                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9297         case 4:
9298                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9299         case 3:
9300                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9301         case 2:
9302         case 1:
9303
9304         default:
9305                 break;
9306         }
9307
9308         if (tg3_flag(tp, ENABLE_APE))
9309                 /* Write our heartbeat update interval to APE. */
9310                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9311                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9312
9313         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9314
9315         return 0;
9316 }
9317
9318 /* Called at device open time to get the chip ready for
9319  * packet processing.  Invoked with tp->lock held.
9320  */
9321 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9322 {
9323         tg3_switch_clocks(tp);
9324
9325         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9326
9327         return tg3_reset_hw(tp, reset_phy);
9328 }
9329
9330 #define TG3_STAT_ADD32(PSTAT, REG) \
9331 do {    u32 __val = tr32(REG); \
9332         (PSTAT)->low += __val; \
9333         if ((PSTAT)->low < __val) \
9334                 (PSTAT)->high += 1; \
9335 } while (0)
9336
9337 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9338 {
9339         struct tg3_hw_stats *sp = tp->hw_stats;
9340
9341         if (!netif_carrier_ok(tp->dev))
9342                 return;
9343
9344         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9345         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9346         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9347         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9348         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9349         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9350         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9351         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9352         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9353         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9354         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9355         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9356         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9357
9358         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9359         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9360         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9361         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9362         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9363         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9364         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9365         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9366         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9367         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9368         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9369         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9370         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9371         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9372
9373         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9374         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9375             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9376             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9377                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9378         } else {
9379                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9380                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9381                 if (val) {
9382                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9383                         sp->rx_discards.low += val;
9384                         if (sp->rx_discards.low < val)
9385                                 sp->rx_discards.high += 1;
9386                 }
9387                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9388         }
9389         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9390 }
9391
9392 static void tg3_chk_missed_msi(struct tg3 *tp)
9393 {
9394         u32 i;
9395
9396         for (i = 0; i < tp->irq_cnt; i++) {
9397                 struct tg3_napi *tnapi = &tp->napi[i];
9398
9399                 if (tg3_has_work(tnapi)) {
9400                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9401                             tnapi->last_tx_cons == tnapi->tx_cons) {
9402                                 if (tnapi->chk_msi_cnt < 1) {
9403                                         tnapi->chk_msi_cnt++;
9404                                         return;
9405                                 }
9406                                 tg3_msi(0, tnapi);
9407                         }
9408                 }
9409                 tnapi->chk_msi_cnt = 0;
9410                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9411                 tnapi->last_tx_cons = tnapi->tx_cons;
9412         }
9413 }
9414
9415 static void tg3_timer(unsigned long __opaque)
9416 {
9417         struct tg3 *tp = (struct tg3 *) __opaque;
9418
9419         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9420                 goto restart_timer;
9421
9422         spin_lock(&tp->lock);
9423
9424         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9425             tg3_flag(tp, 57765_CLASS))
9426                 tg3_chk_missed_msi(tp);
9427
9428         if (!tg3_flag(tp, TAGGED_STATUS)) {
9429                 /* All of this garbage is because when using non-tagged
9430                  * IRQ status the mailbox/status_block protocol the chip
9431                  * uses with the cpu is race prone.
9432                  */
9433                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9434                         tw32(GRC_LOCAL_CTRL,
9435                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9436                 } else {
9437                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9438                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9439                 }
9440
9441                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9442                         spin_unlock(&tp->lock);
9443                         tg3_reset_task_schedule(tp);
9444                         goto restart_timer;
9445                 }
9446         }
9447
9448         /* This part only runs once per second. */
9449         if (!--tp->timer_counter) {
9450                 if (tg3_flag(tp, 5705_PLUS))
9451                         tg3_periodic_fetch_stats(tp);
9452
9453                 if (tp->setlpicnt && !--tp->setlpicnt)
9454                         tg3_phy_eee_enable(tp);
9455
9456                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9457                         u32 mac_stat;
9458                         int phy_event;
9459
9460                         mac_stat = tr32(MAC_STATUS);
9461
9462                         phy_event = 0;
9463                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9464                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9465                                         phy_event = 1;
9466                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9467                                 phy_event = 1;
9468
9469                         if (phy_event)
9470                                 tg3_setup_phy(tp, 0);
9471                 } else if (tg3_flag(tp, POLL_SERDES)) {
9472                         u32 mac_stat = tr32(MAC_STATUS);
9473                         int need_setup = 0;
9474
9475                         if (netif_carrier_ok(tp->dev) &&
9476                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9477                                 need_setup = 1;
9478                         }
9479                         if (!netif_carrier_ok(tp->dev) &&
9480                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9481                                          MAC_STATUS_SIGNAL_DET))) {
9482                                 need_setup = 1;
9483                         }
9484                         if (need_setup) {
9485                                 if (!tp->serdes_counter) {
9486                                         tw32_f(MAC_MODE,
9487                                              (tp->mac_mode &
9488                                               ~MAC_MODE_PORT_MODE_MASK));
9489                                         udelay(40);
9490                                         tw32_f(MAC_MODE, tp->mac_mode);
9491                                         udelay(40);
9492                                 }
9493                                 tg3_setup_phy(tp, 0);
9494                         }
9495                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9496                            tg3_flag(tp, 5780_CLASS)) {
9497                         tg3_serdes_parallel_detect(tp);
9498                 }
9499
9500                 tp->timer_counter = tp->timer_multiplier;
9501         }
9502
9503         /* Heartbeat is only sent once every 2 seconds.
9504          *
9505          * The heartbeat is to tell the ASF firmware that the host
9506          * driver is still alive.  In the event that the OS crashes,
9507          * ASF needs to reset the hardware to free up the FIFO space
9508          * that may be filled with rx packets destined for the host.
9509          * If the FIFO is full, ASF will no longer function properly.
9510          *
9511          * Unintended resets have been reported on real time kernels
9512          * where the timer doesn't run on time.  Netpoll will also have
9513          * same problem.
9514          *
9515          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9516          * to check the ring condition when the heartbeat is expiring
9517          * before doing the reset.  This will prevent most unintended
9518          * resets.
9519          */
9520         if (!--tp->asf_counter) {
9521                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9522                         tg3_wait_for_event_ack(tp);
9523
9524                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9525                                       FWCMD_NICDRV_ALIVE3);
9526                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9527                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9528                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9529
9530                         tg3_generate_fw_event(tp);
9531                 }
9532                 tp->asf_counter = tp->asf_multiplier;
9533         }
9534
9535         spin_unlock(&tp->lock);
9536
9537 restart_timer:
9538         tp->timer.expires = jiffies + tp->timer_offset;
9539         add_timer(&tp->timer);
9540 }
9541
9542 static void __devinit tg3_timer_init(struct tg3 *tp)
9543 {
9544         if (tg3_flag(tp, TAGGED_STATUS) &&
9545             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9546             !tg3_flag(tp, 57765_CLASS))
9547                 tp->timer_offset = HZ;
9548         else
9549                 tp->timer_offset = HZ / 10;
9550
9551         BUG_ON(tp->timer_offset > HZ);
9552
9553         tp->timer_multiplier = (HZ / tp->timer_offset);
9554         tp->asf_multiplier = (HZ / tp->timer_offset) *
9555                              TG3_FW_UPDATE_FREQ_SEC;
9556
9557         init_timer(&tp->timer);
9558         tp->timer.data = (unsigned long) tp;
9559         tp->timer.function = tg3_timer;
9560 }
9561
9562 static void tg3_timer_start(struct tg3 *tp)
9563 {
9564         tp->asf_counter   = tp->asf_multiplier;
9565         tp->timer_counter = tp->timer_multiplier;
9566
9567         tp->timer.expires = jiffies + tp->timer_offset;
9568         add_timer(&tp->timer);
9569 }
9570
9571 static void tg3_timer_stop(struct tg3 *tp)
9572 {
9573         del_timer_sync(&tp->timer);
9574 }
9575
9576 /* Restart hardware after configuration changes, self-test, etc.
9577  * Invoked with tp->lock held.
9578  */
9579 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9580         __releases(tp->lock)
9581         __acquires(tp->lock)
9582 {
9583         int err;
9584
9585         err = tg3_init_hw(tp, reset_phy);
9586         if (err) {
9587                 netdev_err(tp->dev,
9588                            "Failed to re-initialize device, aborting\n");
9589                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9590                 tg3_full_unlock(tp);
9591                 tg3_timer_stop(tp);
9592                 tp->irq_sync = 0;
9593                 tg3_napi_enable(tp);
9594                 dev_close(tp->dev);
9595                 tg3_full_lock(tp, 0);
9596         }
9597         return err;
9598 }
9599
9600 static void tg3_reset_task(struct work_struct *work)
9601 {
9602         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9603         int err;
9604
9605         tg3_full_lock(tp, 0);
9606
9607         if (!netif_running(tp->dev)) {
9608                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9609                 tg3_full_unlock(tp);
9610                 return;
9611         }
9612
9613         tg3_full_unlock(tp);
9614
9615         tg3_phy_stop(tp);
9616
9617         tg3_netif_stop(tp);
9618
9619         tg3_full_lock(tp, 1);
9620
9621         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9622                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9623                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9624                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9625                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9626         }
9627
9628         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9629         err = tg3_init_hw(tp, 1);
9630         if (err)
9631                 goto out;
9632
9633         tg3_netif_start(tp);
9634
9635 out:
9636         tg3_full_unlock(tp);
9637
9638         if (!err)
9639                 tg3_phy_start(tp);
9640
9641         tg3_flag_clear(tp, RESET_TASK_PENDING);
9642 }
9643
9644 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9645 {
9646         irq_handler_t fn;
9647         unsigned long flags;
9648         char *name;
9649         struct tg3_napi *tnapi = &tp->napi[irq_num];
9650
9651         if (tp->irq_cnt == 1)
9652                 name = tp->dev->name;
9653         else {
9654                 name = &tnapi->irq_lbl[0];
9655                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9656                 name[IFNAMSIZ-1] = 0;
9657         }
9658
9659         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9660                 fn = tg3_msi;
9661                 if (tg3_flag(tp, 1SHOT_MSI))
9662                         fn = tg3_msi_1shot;
9663                 flags = 0;
9664         } else {
9665                 fn = tg3_interrupt;
9666                 if (tg3_flag(tp, TAGGED_STATUS))
9667                         fn = tg3_interrupt_tagged;
9668                 flags = IRQF_SHARED;
9669         }
9670
9671         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9672 }
9673
9674 static int tg3_test_interrupt(struct tg3 *tp)
9675 {
9676         struct tg3_napi *tnapi = &tp->napi[0];
9677         struct net_device *dev = tp->dev;
9678         int err, i, intr_ok = 0;
9679         u32 val;
9680
9681         if (!netif_running(dev))
9682                 return -ENODEV;
9683
9684         tg3_disable_ints(tp);
9685
9686         free_irq(tnapi->irq_vec, tnapi);
9687
9688         /*
9689          * Turn off MSI one shot mode.  Otherwise this test has no
9690          * observable way to know whether the interrupt was delivered.
9691          */
9692         if (tg3_flag(tp, 57765_PLUS)) {
9693                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9694                 tw32(MSGINT_MODE, val);
9695         }
9696
9697         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9698                           IRQF_SHARED, dev->name, tnapi);
9699         if (err)
9700                 return err;
9701
9702         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9703         tg3_enable_ints(tp);
9704
9705         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9706                tnapi->coal_now);
9707
9708         for (i = 0; i < 5; i++) {
9709                 u32 int_mbox, misc_host_ctrl;
9710
9711                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9712                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9713
9714                 if ((int_mbox != 0) ||
9715                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9716                         intr_ok = 1;
9717                         break;
9718                 }
9719
9720                 if (tg3_flag(tp, 57765_PLUS) &&
9721                     tnapi->hw_status->status_tag != tnapi->last_tag)
9722                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9723
9724                 msleep(10);
9725         }
9726
9727         tg3_disable_ints(tp);
9728
9729         free_irq(tnapi->irq_vec, tnapi);
9730
9731         err = tg3_request_irq(tp, 0);
9732
9733         if (err)
9734                 return err;
9735
9736         if (intr_ok) {
9737                 /* Reenable MSI one shot mode. */
9738                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9739                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9740                         tw32(MSGINT_MODE, val);
9741                 }
9742                 return 0;
9743         }
9744
9745         return -EIO;
9746 }
9747
9748 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9749  * successfully restored
9750  */
9751 static int tg3_test_msi(struct tg3 *tp)
9752 {
9753         int err;
9754         u16 pci_cmd;
9755
9756         if (!tg3_flag(tp, USING_MSI))
9757                 return 0;
9758
9759         /* Turn off SERR reporting in case MSI terminates with Master
9760          * Abort.
9761          */
9762         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9763         pci_write_config_word(tp->pdev, PCI_COMMAND,
9764                               pci_cmd & ~PCI_COMMAND_SERR);
9765
9766         err = tg3_test_interrupt(tp);
9767
9768         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9769
9770         if (!err)
9771                 return 0;
9772
9773         /* other failures */
9774         if (err != -EIO)
9775                 return err;
9776
9777         /* MSI test failed, go back to INTx mode */
9778         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9779                     "to INTx mode. Please report this failure to the PCI "
9780                     "maintainer and include system chipset information\n");
9781
9782         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9783
9784         pci_disable_msi(tp->pdev);
9785
9786         tg3_flag_clear(tp, USING_MSI);
9787         tp->napi[0].irq_vec = tp->pdev->irq;
9788
9789         err = tg3_request_irq(tp, 0);
9790         if (err)
9791                 return err;
9792
9793         /* Need to reset the chip because the MSI cycle may have terminated
9794          * with Master Abort.
9795          */
9796         tg3_full_lock(tp, 1);
9797
9798         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9799         err = tg3_init_hw(tp, 1);
9800
9801         tg3_full_unlock(tp);
9802
9803         if (err)
9804                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9805
9806         return err;
9807 }
9808
9809 static int tg3_request_firmware(struct tg3 *tp)
9810 {
9811         const __be32 *fw_data;
9812
9813         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9814                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9815                            tp->fw_needed);
9816                 return -ENOENT;
9817         }
9818
9819         fw_data = (void *)tp->fw->data;
9820
9821         /* Firmware blob starts with version numbers, followed by
9822          * start address and _full_ length including BSS sections
9823          * (which must be longer than the actual data, of course
9824          */
9825
9826         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9827         if (tp->fw_len < (tp->fw->size - 12)) {
9828                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9829                            tp->fw_len, tp->fw_needed);
9830                 release_firmware(tp->fw);
9831                 tp->fw = NULL;
9832                 return -EINVAL;
9833         }
9834
9835         /* We no longer need firmware; we have it. */
9836         tp->fw_needed = NULL;
9837         return 0;
9838 }
9839
9840 static bool tg3_enable_msix(struct tg3 *tp)
9841 {
9842         int i, rc;
9843         struct msix_entry msix_ent[tp->irq_max];
9844
9845         tp->irq_cnt = num_online_cpus();
9846         if (tp->irq_cnt > 1) {
9847                 /* We want as many rx rings enabled as there are cpus.
9848                  * In multiqueue MSI-X mode, the first MSI-X vector
9849                  * only deals with link interrupts, etc, so we add
9850                  * one to the number of vectors we are requesting.
9851                  */
9852                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9853         }
9854
9855         for (i = 0; i < tp->irq_max; i++) {
9856                 msix_ent[i].entry  = i;
9857                 msix_ent[i].vector = 0;
9858         }
9859
9860         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9861         if (rc < 0) {
9862                 return false;
9863         } else if (rc != 0) {
9864                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9865                         return false;
9866                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9867                               tp->irq_cnt, rc);
9868                 tp->irq_cnt = rc;
9869         }
9870
9871         for (i = 0; i < tp->irq_max; i++)
9872                 tp->napi[i].irq_vec = msix_ent[i].vector;
9873
9874         netif_set_real_num_tx_queues(tp->dev, 1);
9875         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9876         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9877                 pci_disable_msix(tp->pdev);
9878                 return false;
9879         }
9880
9881         if (tp->irq_cnt > 1) {
9882                 tg3_flag_set(tp, ENABLE_RSS);
9883
9884                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9885                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9886                         tg3_flag_set(tp, ENABLE_TSS);
9887                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9888                 }
9889         }
9890
9891         return true;
9892 }
9893
9894 static void tg3_ints_init(struct tg3 *tp)
9895 {
9896         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9897             !tg3_flag(tp, TAGGED_STATUS)) {
9898                 /* All MSI supporting chips should support tagged
9899                  * status.  Assert that this is the case.
9900                  */
9901                 netdev_warn(tp->dev,
9902                             "MSI without TAGGED_STATUS? Not using MSI\n");
9903                 goto defcfg;
9904         }
9905
9906         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9907                 tg3_flag_set(tp, USING_MSIX);
9908         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9909                 tg3_flag_set(tp, USING_MSI);
9910
9911         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9912                 u32 msi_mode = tr32(MSGINT_MODE);
9913                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9914                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9915                 if (!tg3_flag(tp, 1SHOT_MSI))
9916                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9917                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9918         }
9919 defcfg:
9920         if (!tg3_flag(tp, USING_MSIX)) {
9921                 tp->irq_cnt = 1;
9922                 tp->napi[0].irq_vec = tp->pdev->irq;
9923                 netif_set_real_num_tx_queues(tp->dev, 1);
9924                 netif_set_real_num_rx_queues(tp->dev, 1);
9925         }
9926 }
9927
9928 static void tg3_ints_fini(struct tg3 *tp)
9929 {
9930         if (tg3_flag(tp, USING_MSIX))
9931                 pci_disable_msix(tp->pdev);
9932         else if (tg3_flag(tp, USING_MSI))
9933                 pci_disable_msi(tp->pdev);
9934         tg3_flag_clear(tp, USING_MSI);
9935         tg3_flag_clear(tp, USING_MSIX);
9936         tg3_flag_clear(tp, ENABLE_RSS);
9937         tg3_flag_clear(tp, ENABLE_TSS);
9938 }
9939
9940 static int tg3_open(struct net_device *dev)
9941 {
9942         struct tg3 *tp = netdev_priv(dev);
9943         int i, err;
9944
9945         if (tp->fw_needed) {
9946                 err = tg3_request_firmware(tp);
9947                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9948                         if (err)
9949                                 return err;
9950                 } else if (err) {
9951                         netdev_warn(tp->dev, "TSO capability disabled\n");
9952                         tg3_flag_clear(tp, TSO_CAPABLE);
9953                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9954                         netdev_notice(tp->dev, "TSO capability restored\n");
9955                         tg3_flag_set(tp, TSO_CAPABLE);
9956                 }
9957         }
9958
9959         netif_carrier_off(tp->dev);
9960
9961         err = tg3_power_up(tp);
9962         if (err)
9963                 return err;
9964
9965         tg3_full_lock(tp, 0);
9966
9967         tg3_disable_ints(tp);
9968         tg3_flag_clear(tp, INIT_COMPLETE);
9969
9970         tg3_full_unlock(tp);
9971
9972         /*
9973          * Setup interrupts first so we know how
9974          * many NAPI resources to allocate
9975          */
9976         tg3_ints_init(tp);
9977
9978         tg3_rss_check_indir_tbl(tp);
9979
9980         /* The placement of this call is tied
9981          * to the setup and use of Host TX descriptors.
9982          */
9983         err = tg3_alloc_consistent(tp);
9984         if (err)
9985                 goto err_out1;
9986
9987         tg3_napi_init(tp);
9988
9989         tg3_napi_enable(tp);
9990
9991         for (i = 0; i < tp->irq_cnt; i++) {
9992                 struct tg3_napi *tnapi = &tp->napi[i];
9993                 err = tg3_request_irq(tp, i);
9994                 if (err) {
9995                         for (i--; i >= 0; i--) {
9996                                 tnapi = &tp->napi[i];
9997                                 free_irq(tnapi->irq_vec, tnapi);
9998                         }
9999                         goto err_out2;
10000                 }
10001         }
10002
10003         tg3_full_lock(tp, 0);
10004
10005         err = tg3_init_hw(tp, 1);
10006         if (err) {
10007                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10008                 tg3_free_rings(tp);
10009         }
10010
10011         tg3_full_unlock(tp);
10012
10013         if (err)
10014                 goto err_out3;
10015
10016         if (tg3_flag(tp, USING_MSI)) {
10017                 err = tg3_test_msi(tp);
10018
10019                 if (err) {
10020                         tg3_full_lock(tp, 0);
10021                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10022                         tg3_free_rings(tp);
10023                         tg3_full_unlock(tp);
10024
10025                         goto err_out2;
10026                 }
10027
10028                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10029                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10030
10031                         tw32(PCIE_TRANSACTION_CFG,
10032                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10033                 }
10034         }
10035
10036         tg3_phy_start(tp);
10037
10038         tg3_full_lock(tp, 0);
10039
10040         tg3_timer_start(tp);
10041         tg3_flag_set(tp, INIT_COMPLETE);
10042         tg3_enable_ints(tp);
10043
10044         tg3_full_unlock(tp);
10045
10046         netif_tx_start_all_queues(dev);
10047
10048         /*
10049          * Reset loopback feature if it was turned on while the device was down
10050          * make sure that it's installed properly now.
10051          */
10052         if (dev->features & NETIF_F_LOOPBACK)
10053                 tg3_set_loopback(dev, dev->features);
10054
10055         return 0;
10056
10057 err_out3:
10058         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10059                 struct tg3_napi *tnapi = &tp->napi[i];
10060                 free_irq(tnapi->irq_vec, tnapi);
10061         }
10062
10063 err_out2:
10064         tg3_napi_disable(tp);
10065         tg3_napi_fini(tp);
10066         tg3_free_consistent(tp);
10067
10068 err_out1:
10069         tg3_ints_fini(tp);
10070         tg3_frob_aux_power(tp, false);
10071         pci_set_power_state(tp->pdev, PCI_D3hot);
10072         return err;
10073 }
10074
10075 static int tg3_close(struct net_device *dev)
10076 {
10077         int i;
10078         struct tg3 *tp = netdev_priv(dev);
10079
10080         tg3_napi_disable(tp);
10081         tg3_reset_task_cancel(tp);
10082
10083         netif_tx_stop_all_queues(dev);
10084
10085         tg3_timer_stop(tp);
10086
10087         tg3_phy_stop(tp);
10088
10089         tg3_full_lock(tp, 1);
10090
10091         tg3_disable_ints(tp);
10092
10093         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10094         tg3_free_rings(tp);
10095         tg3_flag_clear(tp, INIT_COMPLETE);
10096
10097         tg3_full_unlock(tp);
10098
10099         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10100                 struct tg3_napi *tnapi = &tp->napi[i];
10101                 free_irq(tnapi->irq_vec, tnapi);
10102         }
10103
10104         tg3_ints_fini(tp);
10105
10106         /* Clear stats across close / open calls */
10107         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10108         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10109
10110         tg3_napi_fini(tp);
10111
10112         tg3_free_consistent(tp);
10113
10114         tg3_power_down(tp);
10115
10116         netif_carrier_off(tp->dev);
10117
10118         return 0;
10119 }
10120
10121 static inline u64 get_stat64(tg3_stat64_t *val)
10122 {
10123        return ((u64)val->high << 32) | ((u64)val->low);
10124 }
10125
10126 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10127 {
10128         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10129
10130         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10131             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10132              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10133                 u32 val;
10134
10135                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10136                         tg3_writephy(tp, MII_TG3_TEST1,
10137                                      val | MII_TG3_TEST1_CRC_EN);
10138                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10139                 } else
10140                         val = 0;
10141
10142                 tp->phy_crc_errors += val;
10143
10144                 return tp->phy_crc_errors;
10145         }
10146
10147         return get_stat64(&hw_stats->rx_fcs_errors);
10148 }
10149
10150 #define ESTAT_ADD(member) \
10151         estats->member =        old_estats->member + \
10152                                 get_stat64(&hw_stats->member)
10153
10154 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10155 {
10156         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10157         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10158
10159         ESTAT_ADD(rx_octets);
10160         ESTAT_ADD(rx_fragments);
10161         ESTAT_ADD(rx_ucast_packets);
10162         ESTAT_ADD(rx_mcast_packets);
10163         ESTAT_ADD(rx_bcast_packets);
10164         ESTAT_ADD(rx_fcs_errors);
10165         ESTAT_ADD(rx_align_errors);
10166         ESTAT_ADD(rx_xon_pause_rcvd);
10167         ESTAT_ADD(rx_xoff_pause_rcvd);
10168         ESTAT_ADD(rx_mac_ctrl_rcvd);
10169         ESTAT_ADD(rx_xoff_entered);
10170         ESTAT_ADD(rx_frame_too_long_errors);
10171         ESTAT_ADD(rx_jabbers);
10172         ESTAT_ADD(rx_undersize_packets);
10173         ESTAT_ADD(rx_in_length_errors);
10174         ESTAT_ADD(rx_out_length_errors);
10175         ESTAT_ADD(rx_64_or_less_octet_packets);
10176         ESTAT_ADD(rx_65_to_127_octet_packets);
10177         ESTAT_ADD(rx_128_to_255_octet_packets);
10178         ESTAT_ADD(rx_256_to_511_octet_packets);
10179         ESTAT_ADD(rx_512_to_1023_octet_packets);
10180         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10181         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10182         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10183         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10184         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10185
10186         ESTAT_ADD(tx_octets);
10187         ESTAT_ADD(tx_collisions);
10188         ESTAT_ADD(tx_xon_sent);
10189         ESTAT_ADD(tx_xoff_sent);
10190         ESTAT_ADD(tx_flow_control);
10191         ESTAT_ADD(tx_mac_errors);
10192         ESTAT_ADD(tx_single_collisions);
10193         ESTAT_ADD(tx_mult_collisions);
10194         ESTAT_ADD(tx_deferred);
10195         ESTAT_ADD(tx_excessive_collisions);
10196         ESTAT_ADD(tx_late_collisions);
10197         ESTAT_ADD(tx_collide_2times);
10198         ESTAT_ADD(tx_collide_3times);
10199         ESTAT_ADD(tx_collide_4times);
10200         ESTAT_ADD(tx_collide_5times);
10201         ESTAT_ADD(tx_collide_6times);
10202         ESTAT_ADD(tx_collide_7times);
10203         ESTAT_ADD(tx_collide_8times);
10204         ESTAT_ADD(tx_collide_9times);
10205         ESTAT_ADD(tx_collide_10times);
10206         ESTAT_ADD(tx_collide_11times);
10207         ESTAT_ADD(tx_collide_12times);
10208         ESTAT_ADD(tx_collide_13times);
10209         ESTAT_ADD(tx_collide_14times);
10210         ESTAT_ADD(tx_collide_15times);
10211         ESTAT_ADD(tx_ucast_packets);
10212         ESTAT_ADD(tx_mcast_packets);
10213         ESTAT_ADD(tx_bcast_packets);
10214         ESTAT_ADD(tx_carrier_sense_errors);
10215         ESTAT_ADD(tx_discards);
10216         ESTAT_ADD(tx_errors);
10217
10218         ESTAT_ADD(dma_writeq_full);
10219         ESTAT_ADD(dma_write_prioq_full);
10220         ESTAT_ADD(rxbds_empty);
10221         ESTAT_ADD(rx_discards);
10222         ESTAT_ADD(rx_errors);
10223         ESTAT_ADD(rx_threshold_hit);
10224
10225         ESTAT_ADD(dma_readq_full);
10226         ESTAT_ADD(dma_read_prioq_full);
10227         ESTAT_ADD(tx_comp_queue_full);
10228
10229         ESTAT_ADD(ring_set_send_prod_index);
10230         ESTAT_ADD(ring_status_update);
10231         ESTAT_ADD(nic_irqs);
10232         ESTAT_ADD(nic_avoided_irqs);
10233         ESTAT_ADD(nic_tx_threshold_hit);
10234
10235         ESTAT_ADD(mbuf_lwm_thresh_hit);
10236 }
10237
10238 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10239 {
10240         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10241         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10242
10243         stats->rx_packets = old_stats->rx_packets +
10244                 get_stat64(&hw_stats->rx_ucast_packets) +
10245                 get_stat64(&hw_stats->rx_mcast_packets) +
10246                 get_stat64(&hw_stats->rx_bcast_packets);
10247
10248         stats->tx_packets = old_stats->tx_packets +
10249                 get_stat64(&hw_stats->tx_ucast_packets) +
10250                 get_stat64(&hw_stats->tx_mcast_packets) +
10251                 get_stat64(&hw_stats->tx_bcast_packets);
10252
10253         stats->rx_bytes = old_stats->rx_bytes +
10254                 get_stat64(&hw_stats->rx_octets);
10255         stats->tx_bytes = old_stats->tx_bytes +
10256                 get_stat64(&hw_stats->tx_octets);
10257
10258         stats->rx_errors = old_stats->rx_errors +
10259                 get_stat64(&hw_stats->rx_errors);
10260         stats->tx_errors = old_stats->tx_errors +
10261                 get_stat64(&hw_stats->tx_errors) +
10262                 get_stat64(&hw_stats->tx_mac_errors) +
10263                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10264                 get_stat64(&hw_stats->tx_discards);
10265
10266         stats->multicast = old_stats->multicast +
10267                 get_stat64(&hw_stats->rx_mcast_packets);
10268         stats->collisions = old_stats->collisions +
10269                 get_stat64(&hw_stats->tx_collisions);
10270
10271         stats->rx_length_errors = old_stats->rx_length_errors +
10272                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10273                 get_stat64(&hw_stats->rx_undersize_packets);
10274
10275         stats->rx_over_errors = old_stats->rx_over_errors +
10276                 get_stat64(&hw_stats->rxbds_empty);
10277         stats->rx_frame_errors = old_stats->rx_frame_errors +
10278                 get_stat64(&hw_stats->rx_align_errors);
10279         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10280                 get_stat64(&hw_stats->tx_discards);
10281         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10282                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10283
10284         stats->rx_crc_errors = old_stats->rx_crc_errors +
10285                 tg3_calc_crc_errors(tp);
10286
10287         stats->rx_missed_errors = old_stats->rx_missed_errors +
10288                 get_stat64(&hw_stats->rx_discards);
10289
10290         stats->rx_dropped = tp->rx_dropped;
10291         stats->tx_dropped = tp->tx_dropped;
10292 }
10293
10294 static int tg3_get_regs_len(struct net_device *dev)
10295 {
10296         return TG3_REG_BLK_SIZE;
10297 }
10298
10299 static void tg3_get_regs(struct net_device *dev,
10300                 struct ethtool_regs *regs, void *_p)
10301 {
10302         struct tg3 *tp = netdev_priv(dev);
10303
10304         regs->version = 0;
10305
10306         memset(_p, 0, TG3_REG_BLK_SIZE);
10307
10308         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10309                 return;
10310
10311         tg3_full_lock(tp, 0);
10312
10313         tg3_dump_legacy_regs(tp, (u32 *)_p);
10314
10315         tg3_full_unlock(tp);
10316 }
10317
10318 static int tg3_get_eeprom_len(struct net_device *dev)
10319 {
10320         struct tg3 *tp = netdev_priv(dev);
10321
10322         return tp->nvram_size;
10323 }
10324
10325 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10326 {
10327         struct tg3 *tp = netdev_priv(dev);
10328         int ret;
10329         u8  *pd;
10330         u32 i, offset, len, b_offset, b_count;
10331         __be32 val;
10332
10333         if (tg3_flag(tp, NO_NVRAM))
10334                 return -EINVAL;
10335
10336         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10337                 return -EAGAIN;
10338
10339         offset = eeprom->offset;
10340         len = eeprom->len;
10341         eeprom->len = 0;
10342
10343         eeprom->magic = TG3_EEPROM_MAGIC;
10344
10345         if (offset & 3) {
10346                 /* adjustments to start on required 4 byte boundary */
10347                 b_offset = offset & 3;
10348                 b_count = 4 - b_offset;
10349                 if (b_count > len) {
10350                         /* i.e. offset=1 len=2 */
10351                         b_count = len;
10352                 }
10353                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10354                 if (ret)
10355                         return ret;
10356                 memcpy(data, ((char *)&val) + b_offset, b_count);
10357                 len -= b_count;
10358                 offset += b_count;
10359                 eeprom->len += b_count;
10360         }
10361
10362         /* read bytes up to the last 4 byte boundary */
10363         pd = &data[eeprom->len];
10364         for (i = 0; i < (len - (len & 3)); i += 4) {
10365                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10366                 if (ret) {
10367                         eeprom->len += i;
10368                         return ret;
10369                 }
10370                 memcpy(pd + i, &val, 4);
10371         }
10372         eeprom->len += i;
10373
10374         if (len & 3) {
10375                 /* read last bytes not ending on 4 byte boundary */
10376                 pd = &data[eeprom->len];
10377                 b_count = len & 3;
10378                 b_offset = offset + len - b_count;
10379                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10380                 if (ret)
10381                         return ret;
10382                 memcpy(pd, &val, b_count);
10383                 eeprom->len += b_count;
10384         }
10385         return 0;
10386 }
10387
10388 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10389 {
10390         struct tg3 *tp = netdev_priv(dev);
10391         int ret;
10392         u32 offset, len, b_offset, odd_len;
10393         u8 *buf;
10394         __be32 start, end;
10395
10396         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10397                 return -EAGAIN;
10398
10399         if (tg3_flag(tp, NO_NVRAM) ||
10400             eeprom->magic != TG3_EEPROM_MAGIC)
10401                 return -EINVAL;
10402
10403         offset = eeprom->offset;
10404         len = eeprom->len;
10405
10406         if ((b_offset = (offset & 3))) {
10407                 /* adjustments to start on required 4 byte boundary */
10408                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10409                 if (ret)
10410                         return ret;
10411                 len += b_offset;
10412                 offset &= ~3;
10413                 if (len < 4)
10414                         len = 4;
10415         }
10416
10417         odd_len = 0;
10418         if (len & 3) {
10419                 /* adjustments to end on required 4 byte boundary */
10420                 odd_len = 1;
10421                 len = (len + 3) & ~3;
10422                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10423                 if (ret)
10424                         return ret;
10425         }
10426
10427         buf = data;
10428         if (b_offset || odd_len) {
10429                 buf = kmalloc(len, GFP_KERNEL);
10430                 if (!buf)
10431                         return -ENOMEM;
10432                 if (b_offset)
10433                         memcpy(buf, &start, 4);
10434                 if (odd_len)
10435                         memcpy(buf+len-4, &end, 4);
10436                 memcpy(buf + b_offset, data, eeprom->len);
10437         }
10438
10439         ret = tg3_nvram_write_block(tp, offset, len, buf);
10440
10441         if (buf != data)
10442                 kfree(buf);
10443
10444         return ret;
10445 }
10446
10447 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10448 {
10449         struct tg3 *tp = netdev_priv(dev);
10450
10451         if (tg3_flag(tp, USE_PHYLIB)) {
10452                 struct phy_device *phydev;
10453                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10454                         return -EAGAIN;
10455                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10456                 return phy_ethtool_gset(phydev, cmd);
10457         }
10458
10459         cmd->supported = (SUPPORTED_Autoneg);
10460
10461         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10462                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10463                                    SUPPORTED_1000baseT_Full);
10464
10465         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10466                 cmd->supported |= (SUPPORTED_100baseT_Half |
10467                                   SUPPORTED_100baseT_Full |
10468                                   SUPPORTED_10baseT_Half |
10469                                   SUPPORTED_10baseT_Full |
10470                                   SUPPORTED_TP);
10471                 cmd->port = PORT_TP;
10472         } else {
10473                 cmd->supported |= SUPPORTED_FIBRE;
10474                 cmd->port = PORT_FIBRE;
10475         }
10476
10477         cmd->advertising = tp->link_config.advertising;
10478         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10479                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10480                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10481                                 cmd->advertising |= ADVERTISED_Pause;
10482                         } else {
10483                                 cmd->advertising |= ADVERTISED_Pause |
10484                                                     ADVERTISED_Asym_Pause;
10485                         }
10486                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10487                         cmd->advertising |= ADVERTISED_Asym_Pause;
10488                 }
10489         }
10490         if (netif_running(dev) && netif_carrier_ok(dev)) {
10491                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10492                 cmd->duplex = tp->link_config.active_duplex;
10493                 cmd->lp_advertising = tp->link_config.rmt_adv;
10494                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10495                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10496                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10497                         else
10498                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10499                 }
10500         } else {
10501                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10502                 cmd->duplex = DUPLEX_UNKNOWN;
10503                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10504         }
10505         cmd->phy_address = tp->phy_addr;
10506         cmd->transceiver = XCVR_INTERNAL;
10507         cmd->autoneg = tp->link_config.autoneg;
10508         cmd->maxtxpkt = 0;
10509         cmd->maxrxpkt = 0;
10510         return 0;
10511 }
10512
10513 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10514 {
10515         struct tg3 *tp = netdev_priv(dev);
10516         u32 speed = ethtool_cmd_speed(cmd);
10517
10518         if (tg3_flag(tp, USE_PHYLIB)) {
10519                 struct phy_device *phydev;
10520                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10521                         return -EAGAIN;
10522                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10523                 return phy_ethtool_sset(phydev, cmd);
10524         }
10525
10526         if (cmd->autoneg != AUTONEG_ENABLE &&
10527             cmd->autoneg != AUTONEG_DISABLE)
10528                 return -EINVAL;
10529
10530         if (cmd->autoneg == AUTONEG_DISABLE &&
10531             cmd->duplex != DUPLEX_FULL &&
10532             cmd->duplex != DUPLEX_HALF)
10533                 return -EINVAL;
10534
10535         if (cmd->autoneg == AUTONEG_ENABLE) {
10536                 u32 mask = ADVERTISED_Autoneg |
10537                            ADVERTISED_Pause |
10538                            ADVERTISED_Asym_Pause;
10539
10540                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10541                         mask |= ADVERTISED_1000baseT_Half |
10542                                 ADVERTISED_1000baseT_Full;
10543
10544                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10545                         mask |= ADVERTISED_100baseT_Half |
10546                                 ADVERTISED_100baseT_Full |
10547                                 ADVERTISED_10baseT_Half |
10548                                 ADVERTISED_10baseT_Full |
10549                                 ADVERTISED_TP;
10550                 else
10551                         mask |= ADVERTISED_FIBRE;
10552
10553                 if (cmd->advertising & ~mask)
10554                         return -EINVAL;
10555
10556                 mask &= (ADVERTISED_1000baseT_Half |
10557                          ADVERTISED_1000baseT_Full |
10558                          ADVERTISED_100baseT_Half |
10559                          ADVERTISED_100baseT_Full |
10560                          ADVERTISED_10baseT_Half |
10561                          ADVERTISED_10baseT_Full);
10562
10563                 cmd->advertising &= mask;
10564         } else {
10565                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10566                         if (speed != SPEED_1000)
10567                                 return -EINVAL;
10568
10569                         if (cmd->duplex != DUPLEX_FULL)
10570                                 return -EINVAL;
10571                 } else {
10572                         if (speed != SPEED_100 &&
10573                             speed != SPEED_10)
10574                                 return -EINVAL;
10575                 }
10576         }
10577
10578         tg3_full_lock(tp, 0);
10579
10580         tp->link_config.autoneg = cmd->autoneg;
10581         if (cmd->autoneg == AUTONEG_ENABLE) {
10582                 tp->link_config.advertising = (cmd->advertising |
10583                                               ADVERTISED_Autoneg);
10584                 tp->link_config.speed = SPEED_UNKNOWN;
10585                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10586         } else {
10587                 tp->link_config.advertising = 0;
10588                 tp->link_config.speed = speed;
10589                 tp->link_config.duplex = cmd->duplex;
10590         }
10591
10592         if (netif_running(dev))
10593                 tg3_setup_phy(tp, 1);
10594
10595         tg3_full_unlock(tp);
10596
10597         return 0;
10598 }
10599
10600 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10601 {
10602         struct tg3 *tp = netdev_priv(dev);
10603
10604         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10605         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10606         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10607         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10608 }
10609
10610 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10611 {
10612         struct tg3 *tp = netdev_priv(dev);
10613
10614         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10615                 wol->supported = WAKE_MAGIC;
10616         else
10617                 wol->supported = 0;
10618         wol->wolopts = 0;
10619         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10620                 wol->wolopts = WAKE_MAGIC;
10621         memset(&wol->sopass, 0, sizeof(wol->sopass));
10622 }
10623
10624 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10625 {
10626         struct tg3 *tp = netdev_priv(dev);
10627         struct device *dp = &tp->pdev->dev;
10628
10629         if (wol->wolopts & ~WAKE_MAGIC)
10630                 return -EINVAL;
10631         if ((wol->wolopts & WAKE_MAGIC) &&
10632             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10633                 return -EINVAL;
10634
10635         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10636
10637         spin_lock_bh(&tp->lock);
10638         if (device_may_wakeup(dp))
10639                 tg3_flag_set(tp, WOL_ENABLE);
10640         else
10641                 tg3_flag_clear(tp, WOL_ENABLE);
10642         spin_unlock_bh(&tp->lock);
10643
10644         return 0;
10645 }
10646
10647 static u32 tg3_get_msglevel(struct net_device *dev)
10648 {
10649         struct tg3 *tp = netdev_priv(dev);
10650         return tp->msg_enable;
10651 }
10652
10653 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10654 {
10655         struct tg3 *tp = netdev_priv(dev);
10656         tp->msg_enable = value;
10657 }
10658
10659 static int tg3_nway_reset(struct net_device *dev)
10660 {
10661         struct tg3 *tp = netdev_priv(dev);
10662         int r;
10663
10664         if (!netif_running(dev))
10665                 return -EAGAIN;
10666
10667         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10668                 return -EINVAL;
10669
10670         if (tg3_flag(tp, USE_PHYLIB)) {
10671                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10672                         return -EAGAIN;
10673                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10674         } else {
10675                 u32 bmcr;
10676
10677                 spin_lock_bh(&tp->lock);
10678                 r = -EINVAL;
10679                 tg3_readphy(tp, MII_BMCR, &bmcr);
10680                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10681                     ((bmcr & BMCR_ANENABLE) ||
10682                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10683                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10684                                                    BMCR_ANENABLE);
10685                         r = 0;
10686                 }
10687                 spin_unlock_bh(&tp->lock);
10688         }
10689
10690         return r;
10691 }
10692
10693 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10694 {
10695         struct tg3 *tp = netdev_priv(dev);
10696
10697         ering->rx_max_pending = tp->rx_std_ring_mask;
10698         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10699                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10700         else
10701                 ering->rx_jumbo_max_pending = 0;
10702
10703         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10704
10705         ering->rx_pending = tp->rx_pending;
10706         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10707                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10708         else
10709                 ering->rx_jumbo_pending = 0;
10710
10711         ering->tx_pending = tp->napi[0].tx_pending;
10712 }
10713
10714 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10715 {
10716         struct tg3 *tp = netdev_priv(dev);
10717         int i, irq_sync = 0, err = 0;
10718
10719         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10720             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10721             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10722             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10723             (tg3_flag(tp, TSO_BUG) &&
10724              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10725                 return -EINVAL;
10726
10727         if (netif_running(dev)) {
10728                 tg3_phy_stop(tp);
10729                 tg3_netif_stop(tp);
10730                 irq_sync = 1;
10731         }
10732
10733         tg3_full_lock(tp, irq_sync);
10734
10735         tp->rx_pending = ering->rx_pending;
10736
10737         if (tg3_flag(tp, MAX_RXPEND_64) &&
10738             tp->rx_pending > 63)
10739                 tp->rx_pending = 63;
10740         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10741
10742         for (i = 0; i < tp->irq_max; i++)
10743                 tp->napi[i].tx_pending = ering->tx_pending;
10744
10745         if (netif_running(dev)) {
10746                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10747                 err = tg3_restart_hw(tp, 1);
10748                 if (!err)
10749                         tg3_netif_start(tp);
10750         }
10751
10752         tg3_full_unlock(tp);
10753
10754         if (irq_sync && !err)
10755                 tg3_phy_start(tp);
10756
10757         return err;
10758 }
10759
10760 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10761 {
10762         struct tg3 *tp = netdev_priv(dev);
10763
10764         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10765
10766         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10767                 epause->rx_pause = 1;
10768         else
10769                 epause->rx_pause = 0;
10770
10771         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10772                 epause->tx_pause = 1;
10773         else
10774                 epause->tx_pause = 0;
10775 }
10776
10777 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10778 {
10779         struct tg3 *tp = netdev_priv(dev);
10780         int err = 0;
10781
10782         if (tg3_flag(tp, USE_PHYLIB)) {
10783                 u32 newadv;
10784                 struct phy_device *phydev;
10785
10786                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10787
10788                 if (!(phydev->supported & SUPPORTED_Pause) ||
10789                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10790                      (epause->rx_pause != epause->tx_pause)))
10791                         return -EINVAL;
10792
10793                 tp->link_config.flowctrl = 0;
10794                 if (epause->rx_pause) {
10795                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10796
10797                         if (epause->tx_pause) {
10798                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10799                                 newadv = ADVERTISED_Pause;
10800                         } else
10801                                 newadv = ADVERTISED_Pause |
10802                                          ADVERTISED_Asym_Pause;
10803                 } else if (epause->tx_pause) {
10804                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10805                         newadv = ADVERTISED_Asym_Pause;
10806                 } else
10807                         newadv = 0;
10808
10809                 if (epause->autoneg)
10810                         tg3_flag_set(tp, PAUSE_AUTONEG);
10811                 else
10812                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10813
10814                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10815                         u32 oldadv = phydev->advertising &
10816                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10817                         if (oldadv != newadv) {
10818                                 phydev->advertising &=
10819                                         ~(ADVERTISED_Pause |
10820                                           ADVERTISED_Asym_Pause);
10821                                 phydev->advertising |= newadv;
10822                                 if (phydev->autoneg) {
10823                                         /*
10824                                          * Always renegotiate the link to
10825                                          * inform our link partner of our
10826                                          * flow control settings, even if the
10827                                          * flow control is forced.  Let
10828                                          * tg3_adjust_link() do the final
10829                                          * flow control setup.
10830                                          */
10831                                         return phy_start_aneg(phydev);
10832                                 }
10833                         }
10834
10835                         if (!epause->autoneg)
10836                                 tg3_setup_flow_control(tp, 0, 0);
10837                 } else {
10838                         tp->link_config.advertising &=
10839                                         ~(ADVERTISED_Pause |
10840                                           ADVERTISED_Asym_Pause);
10841                         tp->link_config.advertising |= newadv;
10842                 }
10843         } else {
10844                 int irq_sync = 0;
10845
10846                 if (netif_running(dev)) {
10847                         tg3_netif_stop(tp);
10848                         irq_sync = 1;
10849                 }
10850
10851                 tg3_full_lock(tp, irq_sync);
10852
10853                 if (epause->autoneg)
10854                         tg3_flag_set(tp, PAUSE_AUTONEG);
10855                 else
10856                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10857                 if (epause->rx_pause)
10858                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10859                 else
10860                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10861                 if (epause->tx_pause)
10862                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10863                 else
10864                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10865
10866                 if (netif_running(dev)) {
10867                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10868                         err = tg3_restart_hw(tp, 1);
10869                         if (!err)
10870                                 tg3_netif_start(tp);
10871                 }
10872
10873                 tg3_full_unlock(tp);
10874         }
10875
10876         return err;
10877 }
10878
10879 static int tg3_get_sset_count(struct net_device *dev, int sset)
10880 {
10881         switch (sset) {
10882         case ETH_SS_TEST:
10883                 return TG3_NUM_TEST;
10884         case ETH_SS_STATS:
10885                 return TG3_NUM_STATS;
10886         default:
10887                 return -EOPNOTSUPP;
10888         }
10889 }
10890
10891 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10892                          u32 *rules __always_unused)
10893 {
10894         struct tg3 *tp = netdev_priv(dev);
10895
10896         if (!tg3_flag(tp, SUPPORT_MSIX))
10897                 return -EOPNOTSUPP;
10898
10899         switch (info->cmd) {
10900         case ETHTOOL_GRXRINGS:
10901                 if (netif_running(tp->dev))
10902                         info->data = tp->irq_cnt;
10903                 else {
10904                         info->data = num_online_cpus();
10905                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10906                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10907                 }
10908
10909                 /* The first interrupt vector only
10910                  * handles link interrupts.
10911                  */
10912                 info->data -= 1;
10913                 return 0;
10914
10915         default:
10916                 return -EOPNOTSUPP;
10917         }
10918 }
10919
10920 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10921 {
10922         u32 size = 0;
10923         struct tg3 *tp = netdev_priv(dev);
10924
10925         if (tg3_flag(tp, SUPPORT_MSIX))
10926                 size = TG3_RSS_INDIR_TBL_SIZE;
10927
10928         return size;
10929 }
10930
10931 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10932 {
10933         struct tg3 *tp = netdev_priv(dev);
10934         int i;
10935
10936         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10937                 indir[i] = tp->rss_ind_tbl[i];
10938
10939         return 0;
10940 }
10941
10942 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10943 {
10944         struct tg3 *tp = netdev_priv(dev);
10945         size_t i;
10946
10947         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10948                 tp->rss_ind_tbl[i] = indir[i];
10949
10950         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10951                 return 0;
10952
10953         /* It is legal to write the indirection
10954          * table while the device is running.
10955          */
10956         tg3_full_lock(tp, 0);
10957         tg3_rss_write_indir_tbl(tp);
10958         tg3_full_unlock(tp);
10959
10960         return 0;
10961 }
10962
10963 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10964 {
10965         switch (stringset) {
10966         case ETH_SS_STATS:
10967                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10968                 break;
10969         case ETH_SS_TEST:
10970                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10971                 break;
10972         default:
10973                 WARN_ON(1);     /* we need a WARN() */
10974                 break;
10975         }
10976 }
10977
10978 static int tg3_set_phys_id(struct net_device *dev,
10979                             enum ethtool_phys_id_state state)
10980 {
10981         struct tg3 *tp = netdev_priv(dev);
10982
10983         if (!netif_running(tp->dev))
10984                 return -EAGAIN;
10985
10986         switch (state) {
10987         case ETHTOOL_ID_ACTIVE:
10988                 return 1;       /* cycle on/off once per second */
10989
10990         case ETHTOOL_ID_ON:
10991                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10992                      LED_CTRL_1000MBPS_ON |
10993                      LED_CTRL_100MBPS_ON |
10994                      LED_CTRL_10MBPS_ON |
10995                      LED_CTRL_TRAFFIC_OVERRIDE |
10996                      LED_CTRL_TRAFFIC_BLINK |
10997                      LED_CTRL_TRAFFIC_LED);
10998                 break;
10999
11000         case ETHTOOL_ID_OFF:
11001                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11002                      LED_CTRL_TRAFFIC_OVERRIDE);
11003                 break;
11004
11005         case ETHTOOL_ID_INACTIVE:
11006                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11007                 break;
11008         }
11009
11010         return 0;
11011 }
11012
11013 static void tg3_get_ethtool_stats(struct net_device *dev,
11014                                    struct ethtool_stats *estats, u64 *tmp_stats)
11015 {
11016         struct tg3 *tp = netdev_priv(dev);
11017
11018         if (tp->hw_stats)
11019                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11020         else
11021                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11022 }
11023
11024 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11025 {
11026         int i;
11027         __be32 *buf;
11028         u32 offset = 0, len = 0;
11029         u32 magic, val;
11030
11031         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11032                 return NULL;
11033
11034         if (magic == TG3_EEPROM_MAGIC) {
11035                 for (offset = TG3_NVM_DIR_START;
11036                      offset < TG3_NVM_DIR_END;
11037                      offset += TG3_NVM_DIRENT_SIZE) {
11038                         if (tg3_nvram_read(tp, offset, &val))
11039                                 return NULL;
11040
11041                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11042                             TG3_NVM_DIRTYPE_EXTVPD)
11043                                 break;
11044                 }
11045
11046                 if (offset != TG3_NVM_DIR_END) {
11047                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11048                         if (tg3_nvram_read(tp, offset + 4, &offset))
11049                                 return NULL;
11050
11051                         offset = tg3_nvram_logical_addr(tp, offset);
11052                 }
11053         }
11054
11055         if (!offset || !len) {
11056                 offset = TG3_NVM_VPD_OFF;
11057                 len = TG3_NVM_VPD_LEN;
11058         }
11059
11060         buf = kmalloc(len, GFP_KERNEL);
11061         if (buf == NULL)
11062                 return NULL;
11063
11064         if (magic == TG3_EEPROM_MAGIC) {
11065                 for (i = 0; i < len; i += 4) {
11066                         /* The data is in little-endian format in NVRAM.
11067                          * Use the big-endian read routines to preserve
11068                          * the byte order as it exists in NVRAM.
11069                          */
11070                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11071                                 goto error;
11072                 }
11073         } else {
11074                 u8 *ptr;
11075                 ssize_t cnt;
11076                 unsigned int pos = 0;
11077
11078                 ptr = (u8 *)&buf[0];
11079                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11080                         cnt = pci_read_vpd(tp->pdev, pos,
11081                                            len - pos, ptr);
11082                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11083                                 cnt = 0;
11084                         else if (cnt < 0)
11085                                 goto error;
11086                 }
11087                 if (pos != len)
11088                         goto error;
11089         }
11090
11091         *vpdlen = len;
11092
11093         return buf;
11094
11095 error:
11096         kfree(buf);
11097         return NULL;
11098 }
11099
11100 #define NVRAM_TEST_SIZE 0x100
11101 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11102 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11103 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11104 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11105 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11106 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11107 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11108 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11109
11110 static int tg3_test_nvram(struct tg3 *tp)
11111 {
11112         u32 csum, magic, len;
11113         __be32 *buf;
11114         int i, j, k, err = 0, size;
11115
11116         if (tg3_flag(tp, NO_NVRAM))
11117                 return 0;
11118
11119         if (tg3_nvram_read(tp, 0, &magic) != 0)
11120                 return -EIO;
11121
11122         if (magic == TG3_EEPROM_MAGIC)
11123                 size = NVRAM_TEST_SIZE;
11124         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11125                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11126                     TG3_EEPROM_SB_FORMAT_1) {
11127                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11128                         case TG3_EEPROM_SB_REVISION_0:
11129                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11130                                 break;
11131                         case TG3_EEPROM_SB_REVISION_2:
11132                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11133                                 break;
11134                         case TG3_EEPROM_SB_REVISION_3:
11135                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11136                                 break;
11137                         case TG3_EEPROM_SB_REVISION_4:
11138                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11139                                 break;
11140                         case TG3_EEPROM_SB_REVISION_5:
11141                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11142                                 break;
11143                         case TG3_EEPROM_SB_REVISION_6:
11144                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11145                                 break;
11146                         default:
11147                                 return -EIO;
11148                         }
11149                 } else
11150                         return 0;
11151         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11152                 size = NVRAM_SELFBOOT_HW_SIZE;
11153         else
11154                 return -EIO;
11155
11156         buf = kmalloc(size, GFP_KERNEL);
11157         if (buf == NULL)
11158                 return -ENOMEM;
11159
11160         err = -EIO;
11161         for (i = 0, j = 0; i < size; i += 4, j++) {
11162                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11163                 if (err)
11164                         break;
11165         }
11166         if (i < size)
11167                 goto out;
11168
11169         /* Selfboot format */
11170         magic = be32_to_cpu(buf[0]);
11171         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11172             TG3_EEPROM_MAGIC_FW) {
11173                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11174
11175                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11176                     TG3_EEPROM_SB_REVISION_2) {
11177                         /* For rev 2, the csum doesn't include the MBA. */
11178                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11179                                 csum8 += buf8[i];
11180                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11181                                 csum8 += buf8[i];
11182                 } else {
11183                         for (i = 0; i < size; i++)
11184                                 csum8 += buf8[i];
11185                 }
11186
11187                 if (csum8 == 0) {
11188                         err = 0;
11189                         goto out;
11190                 }
11191
11192                 err = -EIO;
11193                 goto out;
11194         }
11195
11196         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11197             TG3_EEPROM_MAGIC_HW) {
11198                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11199                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11200                 u8 *buf8 = (u8 *) buf;
11201
11202                 /* Separate the parity bits and the data bytes.  */
11203                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11204                         if ((i == 0) || (i == 8)) {
11205                                 int l;
11206                                 u8 msk;
11207
11208                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11209                                         parity[k++] = buf8[i] & msk;
11210                                 i++;
11211                         } else if (i == 16) {
11212                                 int l;
11213                                 u8 msk;
11214
11215                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11216                                         parity[k++] = buf8[i] & msk;
11217                                 i++;
11218
11219                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11220                                         parity[k++] = buf8[i] & msk;
11221                                 i++;
11222                         }
11223                         data[j++] = buf8[i];
11224                 }
11225
11226                 err = -EIO;
11227                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11228                         u8 hw8 = hweight8(data[i]);
11229
11230                         if ((hw8 & 0x1) && parity[i])
11231                                 goto out;
11232                         else if (!(hw8 & 0x1) && !parity[i])
11233                                 goto out;
11234                 }
11235                 err = 0;
11236                 goto out;
11237         }
11238
11239         err = -EIO;
11240
11241         /* Bootstrap checksum at offset 0x10 */
11242         csum = calc_crc((unsigned char *) buf, 0x10);
11243         if (csum != le32_to_cpu(buf[0x10/4]))
11244                 goto out;
11245
11246         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11247         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11248         if (csum != le32_to_cpu(buf[0xfc/4]))
11249                 goto out;
11250
11251         kfree(buf);
11252
11253         buf = tg3_vpd_readblock(tp, &len);
11254         if (!buf)
11255                 return -ENOMEM;
11256
11257         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11258         if (i > 0) {
11259                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11260                 if (j < 0)
11261                         goto out;
11262
11263                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11264                         goto out;
11265
11266                 i += PCI_VPD_LRDT_TAG_SIZE;
11267                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11268                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11269                 if (j > 0) {
11270                         u8 csum8 = 0;
11271
11272                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11273
11274                         for (i = 0; i <= j; i++)
11275                                 csum8 += ((u8 *)buf)[i];
11276
11277                         if (csum8)
11278                                 goto out;
11279                 }
11280         }
11281
11282         err = 0;
11283
11284 out:
11285         kfree(buf);
11286         return err;
11287 }
11288
11289 #define TG3_SERDES_TIMEOUT_SEC  2
11290 #define TG3_COPPER_TIMEOUT_SEC  6
11291
11292 static int tg3_test_link(struct tg3 *tp)
11293 {
11294         int i, max;
11295
11296         if (!netif_running(tp->dev))
11297                 return -ENODEV;
11298
11299         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11300                 max = TG3_SERDES_TIMEOUT_SEC;
11301         else
11302                 max = TG3_COPPER_TIMEOUT_SEC;
11303
11304         for (i = 0; i < max; i++) {
11305                 if (netif_carrier_ok(tp->dev))
11306                         return 0;
11307
11308                 if (msleep_interruptible(1000))
11309                         break;
11310         }
11311
11312         return -EIO;
11313 }
11314
11315 /* Only test the commonly used registers */
11316 static int tg3_test_registers(struct tg3 *tp)
11317 {
11318         int i, is_5705, is_5750;
11319         u32 offset, read_mask, write_mask, val, save_val, read_val;
11320         static struct {
11321                 u16 offset;
11322                 u16 flags;
11323 #define TG3_FL_5705     0x1
11324 #define TG3_FL_NOT_5705 0x2
11325 #define TG3_FL_NOT_5788 0x4
11326 #define TG3_FL_NOT_5750 0x8
11327                 u32 read_mask;
11328                 u32 write_mask;
11329         } reg_tbl[] = {
11330                 /* MAC Control Registers */
11331                 { MAC_MODE, TG3_FL_NOT_5705,
11332                         0x00000000, 0x00ef6f8c },
11333                 { MAC_MODE, TG3_FL_5705,
11334                         0x00000000, 0x01ef6b8c },
11335                 { MAC_STATUS, TG3_FL_NOT_5705,
11336                         0x03800107, 0x00000000 },
11337                 { MAC_STATUS, TG3_FL_5705,
11338                         0x03800100, 0x00000000 },
11339                 { MAC_ADDR_0_HIGH, 0x0000,
11340                         0x00000000, 0x0000ffff },
11341                 { MAC_ADDR_0_LOW, 0x0000,
11342                         0x00000000, 0xffffffff },
11343                 { MAC_RX_MTU_SIZE, 0x0000,
11344                         0x00000000, 0x0000ffff },
11345                 { MAC_TX_MODE, 0x0000,
11346                         0x00000000, 0x00000070 },
11347                 { MAC_TX_LENGTHS, 0x0000,
11348                         0x00000000, 0x00003fff },
11349                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11350                         0x00000000, 0x000007fc },
11351                 { MAC_RX_MODE, TG3_FL_5705,
11352                         0x00000000, 0x000007dc },
11353                 { MAC_HASH_REG_0, 0x0000,
11354                         0x00000000, 0xffffffff },
11355                 { MAC_HASH_REG_1, 0x0000,
11356                         0x00000000, 0xffffffff },
11357                 { MAC_HASH_REG_2, 0x0000,
11358                         0x00000000, 0xffffffff },
11359                 { MAC_HASH_REG_3, 0x0000,
11360                         0x00000000, 0xffffffff },
11361
11362                 /* Receive Data and Receive BD Initiator Control Registers. */
11363                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11364                         0x00000000, 0xffffffff },
11365                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11366                         0x00000000, 0xffffffff },
11367                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11368                         0x00000000, 0x00000003 },
11369                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11370                         0x00000000, 0xffffffff },
11371                 { RCVDBDI_STD_BD+0, 0x0000,
11372                         0x00000000, 0xffffffff },
11373                 { RCVDBDI_STD_BD+4, 0x0000,
11374                         0x00000000, 0xffffffff },
11375                 { RCVDBDI_STD_BD+8, 0x0000,
11376                         0x00000000, 0xffff0002 },
11377                 { RCVDBDI_STD_BD+0xc, 0x0000,
11378                         0x00000000, 0xffffffff },
11379
11380                 /* Receive BD Initiator Control Registers. */
11381                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11382                         0x00000000, 0xffffffff },
11383                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11384                         0x00000000, 0x000003ff },
11385                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11386                         0x00000000, 0xffffffff },
11387
11388                 /* Host Coalescing Control Registers. */
11389                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11390                         0x00000000, 0x00000004 },
11391                 { HOSTCC_MODE, TG3_FL_5705,
11392                         0x00000000, 0x000000f6 },
11393                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11394                         0x00000000, 0xffffffff },
11395                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11396                         0x00000000, 0x000003ff },
11397                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11398                         0x00000000, 0xffffffff },
11399                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11400                         0x00000000, 0x000003ff },
11401                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11402                         0x00000000, 0xffffffff },
11403                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11404                         0x00000000, 0x000000ff },
11405                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11406                         0x00000000, 0xffffffff },
11407                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11408                         0x00000000, 0x000000ff },
11409                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11410                         0x00000000, 0xffffffff },
11411                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11412                         0x00000000, 0xffffffff },
11413                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11414                         0x00000000, 0xffffffff },
11415                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11416                         0x00000000, 0x000000ff },
11417                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11418                         0x00000000, 0xffffffff },
11419                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11420                         0x00000000, 0x000000ff },
11421                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11422                         0x00000000, 0xffffffff },
11423                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11424                         0x00000000, 0xffffffff },
11425                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11426                         0x00000000, 0xffffffff },
11427                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11428                         0x00000000, 0xffffffff },
11429                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11430                         0x00000000, 0xffffffff },
11431                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11432                         0xffffffff, 0x00000000 },
11433                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11434                         0xffffffff, 0x00000000 },
11435
11436                 /* Buffer Manager Control Registers. */
11437                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11438                         0x00000000, 0x007fff80 },
11439                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11440                         0x00000000, 0x007fffff },
11441                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11442                         0x00000000, 0x0000003f },
11443                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11444                         0x00000000, 0x000001ff },
11445                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11446                         0x00000000, 0x000001ff },
11447                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11448                         0xffffffff, 0x00000000 },
11449                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11450                         0xffffffff, 0x00000000 },
11451
11452                 /* Mailbox Registers */
11453                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11454                         0x00000000, 0x000001ff },
11455                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11456                         0x00000000, 0x000001ff },
11457                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11458                         0x00000000, 0x000007ff },
11459                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11460                         0x00000000, 0x000001ff },
11461
11462                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11463         };
11464
11465         is_5705 = is_5750 = 0;
11466         if (tg3_flag(tp, 5705_PLUS)) {
11467                 is_5705 = 1;
11468                 if (tg3_flag(tp, 5750_PLUS))
11469                         is_5750 = 1;
11470         }
11471
11472         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11473                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11474                         continue;
11475
11476                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11477                         continue;
11478
11479                 if (tg3_flag(tp, IS_5788) &&
11480                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11481                         continue;
11482
11483                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11484                         continue;
11485
11486                 offset = (u32) reg_tbl[i].offset;
11487                 read_mask = reg_tbl[i].read_mask;
11488                 write_mask = reg_tbl[i].write_mask;
11489
11490                 /* Save the original register content */
11491                 save_val = tr32(offset);
11492
11493                 /* Determine the read-only value. */
11494                 read_val = save_val & read_mask;
11495
11496                 /* Write zero to the register, then make sure the read-only bits
11497                  * are not changed and the read/write bits are all zeros.
11498                  */
11499                 tw32(offset, 0);
11500
11501                 val = tr32(offset);
11502
11503                 /* Test the read-only and read/write bits. */
11504                 if (((val & read_mask) != read_val) || (val & write_mask))
11505                         goto out;
11506
11507                 /* Write ones to all the bits defined by RdMask and WrMask, then
11508                  * make sure the read-only bits are not changed and the
11509                  * read/write bits are all ones.
11510                  */
11511                 tw32(offset, read_mask | write_mask);
11512
11513                 val = tr32(offset);
11514
11515                 /* Test the read-only bits. */
11516                 if ((val & read_mask) != read_val)
11517                         goto out;
11518
11519                 /* Test the read/write bits. */
11520                 if ((val & write_mask) != write_mask)
11521                         goto out;
11522
11523                 tw32(offset, save_val);
11524         }
11525
11526         return 0;
11527
11528 out:
11529         if (netif_msg_hw(tp))
11530                 netdev_err(tp->dev,
11531                            "Register test failed at offset %x\n", offset);
11532         tw32(offset, save_val);
11533         return -EIO;
11534 }
11535
11536 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11537 {
11538         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11539         int i;
11540         u32 j;
11541
11542         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11543                 for (j = 0; j < len; j += 4) {
11544                         u32 val;
11545
11546                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11547                         tg3_read_mem(tp, offset + j, &val);
11548                         if (val != test_pattern[i])
11549                                 return -EIO;
11550                 }
11551         }
11552         return 0;
11553 }
11554
11555 static int tg3_test_memory(struct tg3 *tp)
11556 {
11557         static struct mem_entry {
11558                 u32 offset;
11559                 u32 len;
11560         } mem_tbl_570x[] = {
11561                 { 0x00000000, 0x00b50},
11562                 { 0x00002000, 0x1c000},
11563                 { 0xffffffff, 0x00000}
11564         }, mem_tbl_5705[] = {
11565                 { 0x00000100, 0x0000c},
11566                 { 0x00000200, 0x00008},
11567                 { 0x00004000, 0x00800},
11568                 { 0x00006000, 0x01000},
11569                 { 0x00008000, 0x02000},
11570                 { 0x00010000, 0x0e000},
11571                 { 0xffffffff, 0x00000}
11572         }, mem_tbl_5755[] = {
11573                 { 0x00000200, 0x00008},
11574                 { 0x00004000, 0x00800},
11575                 { 0x00006000, 0x00800},
11576                 { 0x00008000, 0x02000},
11577                 { 0x00010000, 0x0c000},
11578                 { 0xffffffff, 0x00000}
11579         }, mem_tbl_5906[] = {
11580                 { 0x00000200, 0x00008},
11581                 { 0x00004000, 0x00400},
11582                 { 0x00006000, 0x00400},
11583                 { 0x00008000, 0x01000},
11584                 { 0x00010000, 0x01000},
11585                 { 0xffffffff, 0x00000}
11586         }, mem_tbl_5717[] = {
11587                 { 0x00000200, 0x00008},
11588                 { 0x00010000, 0x0a000},
11589                 { 0x00020000, 0x13c00},
11590                 { 0xffffffff, 0x00000}
11591         }, mem_tbl_57765[] = {
11592                 { 0x00000200, 0x00008},
11593                 { 0x00004000, 0x00800},
11594                 { 0x00006000, 0x09800},
11595                 { 0x00010000, 0x0a000},
11596                 { 0xffffffff, 0x00000}
11597         };
11598         struct mem_entry *mem_tbl;
11599         int err = 0;
11600         int i;
11601
11602         if (tg3_flag(tp, 5717_PLUS))
11603                 mem_tbl = mem_tbl_5717;
11604         else if (tg3_flag(tp, 57765_CLASS))
11605                 mem_tbl = mem_tbl_57765;
11606         else if (tg3_flag(tp, 5755_PLUS))
11607                 mem_tbl = mem_tbl_5755;
11608         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11609                 mem_tbl = mem_tbl_5906;
11610         else if (tg3_flag(tp, 5705_PLUS))
11611                 mem_tbl = mem_tbl_5705;
11612         else
11613                 mem_tbl = mem_tbl_570x;
11614
11615         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11616                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11617                 if (err)
11618                         break;
11619         }
11620
11621         return err;
11622 }
11623
11624 #define TG3_TSO_MSS             500
11625
11626 #define TG3_TSO_IP_HDR_LEN      20
11627 #define TG3_TSO_TCP_HDR_LEN     20
11628 #define TG3_TSO_TCP_OPT_LEN     12
11629
11630 static const u8 tg3_tso_header[] = {
11631 0x08, 0x00,
11632 0x45, 0x00, 0x00, 0x00,
11633 0x00, 0x00, 0x40, 0x00,
11634 0x40, 0x06, 0x00, 0x00,
11635 0x0a, 0x00, 0x00, 0x01,
11636 0x0a, 0x00, 0x00, 0x02,
11637 0x0d, 0x00, 0xe0, 0x00,
11638 0x00, 0x00, 0x01, 0x00,
11639 0x00, 0x00, 0x02, 0x00,
11640 0x80, 0x10, 0x10, 0x00,
11641 0x14, 0x09, 0x00, 0x00,
11642 0x01, 0x01, 0x08, 0x0a,
11643 0x11, 0x11, 0x11, 0x11,
11644 0x11, 0x11, 0x11, 0x11,
11645 };
11646
11647 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11648 {
11649         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11650         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11651         u32 budget;
11652         struct sk_buff *skb;
11653         u8 *tx_data, *rx_data;
11654         dma_addr_t map;
11655         int num_pkts, tx_len, rx_len, i, err;
11656         struct tg3_rx_buffer_desc *desc;
11657         struct tg3_napi *tnapi, *rnapi;
11658         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11659
11660         tnapi = &tp->napi[0];
11661         rnapi = &tp->napi[0];
11662         if (tp->irq_cnt > 1) {
11663                 if (tg3_flag(tp, ENABLE_RSS))
11664                         rnapi = &tp->napi[1];
11665                 if (tg3_flag(tp, ENABLE_TSS))
11666                         tnapi = &tp->napi[1];
11667         }
11668         coal_now = tnapi->coal_now | rnapi->coal_now;
11669
11670         err = -EIO;
11671
11672         tx_len = pktsz;
11673         skb = netdev_alloc_skb(tp->dev, tx_len);
11674         if (!skb)
11675                 return -ENOMEM;
11676
11677         tx_data = skb_put(skb, tx_len);
11678         memcpy(tx_data, tp->dev->dev_addr, 6);
11679         memset(tx_data + 6, 0x0, 8);
11680
11681         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11682
11683         if (tso_loopback) {
11684                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11685
11686                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11687                               TG3_TSO_TCP_OPT_LEN;
11688
11689                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11690                        sizeof(tg3_tso_header));
11691                 mss = TG3_TSO_MSS;
11692
11693                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11694                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11695
11696                 /* Set the total length field in the IP header */
11697                 iph->tot_len = htons((u16)(mss + hdr_len));
11698
11699                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11700                               TXD_FLAG_CPU_POST_DMA);
11701
11702                 if (tg3_flag(tp, HW_TSO_1) ||
11703                     tg3_flag(tp, HW_TSO_2) ||
11704                     tg3_flag(tp, HW_TSO_3)) {
11705                         struct tcphdr *th;
11706                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11707                         th = (struct tcphdr *)&tx_data[val];
11708                         th->check = 0;
11709                 } else
11710                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11711
11712                 if (tg3_flag(tp, HW_TSO_3)) {
11713                         mss |= (hdr_len & 0xc) << 12;
11714                         if (hdr_len & 0x10)
11715                                 base_flags |= 0x00000010;
11716                         base_flags |= (hdr_len & 0x3e0) << 5;
11717                 } else if (tg3_flag(tp, HW_TSO_2))
11718                         mss |= hdr_len << 9;
11719                 else if (tg3_flag(tp, HW_TSO_1) ||
11720                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11721                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11722                 } else {
11723                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11724                 }
11725
11726                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11727         } else {
11728                 num_pkts = 1;
11729                 data_off = ETH_HLEN;
11730
11731                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11732                     tx_len > VLAN_ETH_FRAME_LEN)
11733                         base_flags |= TXD_FLAG_JMB_PKT;
11734         }
11735
11736         for (i = data_off; i < tx_len; i++)
11737                 tx_data[i] = (u8) (i & 0xff);
11738
11739         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11740         if (pci_dma_mapping_error(tp->pdev, map)) {
11741                 dev_kfree_skb(skb);
11742                 return -EIO;
11743         }
11744
11745         val = tnapi->tx_prod;
11746         tnapi->tx_buffers[val].skb = skb;
11747         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11748
11749         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11750                rnapi->coal_now);
11751
11752         udelay(10);
11753
11754         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11755
11756         budget = tg3_tx_avail(tnapi);
11757         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11758                             base_flags | TXD_FLAG_END, mss, 0)) {
11759                 tnapi->tx_buffers[val].skb = NULL;
11760                 dev_kfree_skb(skb);
11761                 return -EIO;
11762         }
11763
11764         tnapi->tx_prod++;
11765
11766         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11767         tr32_mailbox(tnapi->prodmbox);
11768
11769         udelay(10);
11770
11771         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11772         for (i = 0; i < 35; i++) {
11773                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11774                        coal_now);
11775
11776                 udelay(10);
11777
11778                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11779                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11780                 if ((tx_idx == tnapi->tx_prod) &&
11781                     (rx_idx == (rx_start_idx + num_pkts)))
11782                         break;
11783         }
11784
11785         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11786         dev_kfree_skb(skb);
11787
11788         if (tx_idx != tnapi->tx_prod)
11789                 goto out;
11790
11791         if (rx_idx != rx_start_idx + num_pkts)
11792                 goto out;
11793
11794         val = data_off;
11795         while (rx_idx != rx_start_idx) {
11796                 desc = &rnapi->rx_rcb[rx_start_idx++];
11797                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11798                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11799
11800                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11801                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11802                         goto out;
11803
11804                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11805                          - ETH_FCS_LEN;
11806
11807                 if (!tso_loopback) {
11808                         if (rx_len != tx_len)
11809                                 goto out;
11810
11811                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11812                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11813                                         goto out;
11814                         } else {
11815                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11816                                         goto out;
11817                         }
11818                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11819                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11820                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11821                         goto out;
11822                 }
11823
11824                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11825                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11826                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11827                                              mapping);
11828                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11829                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11830                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11831                                              mapping);
11832                 } else
11833                         goto out;
11834
11835                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11836                                             PCI_DMA_FROMDEVICE);
11837
11838                 rx_data += TG3_RX_OFFSET(tp);
11839                 for (i = data_off; i < rx_len; i++, val++) {
11840                         if (*(rx_data + i) != (u8) (val & 0xff))
11841                                 goto out;
11842                 }
11843         }
11844
11845         err = 0;
11846
11847         /* tg3_free_rings will unmap and free the rx_data */
11848 out:
11849         return err;
11850 }
11851
11852 #define TG3_STD_LOOPBACK_FAILED         1
11853 #define TG3_JMB_LOOPBACK_FAILED         2
11854 #define TG3_TSO_LOOPBACK_FAILED         4
11855 #define TG3_LOOPBACK_FAILED \
11856         (TG3_STD_LOOPBACK_FAILED | \
11857          TG3_JMB_LOOPBACK_FAILED | \
11858          TG3_TSO_LOOPBACK_FAILED)
11859
11860 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11861 {
11862         int err = -EIO;
11863         u32 eee_cap;
11864         u32 jmb_pkt_sz = 9000;
11865
11866         if (tp->dma_limit)
11867                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11868
11869         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11870         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11871
11872         if (!netif_running(tp->dev)) {
11873                 data[0] = TG3_LOOPBACK_FAILED;
11874                 data[1] = TG3_LOOPBACK_FAILED;
11875                 if (do_extlpbk)
11876                         data[2] = TG3_LOOPBACK_FAILED;
11877                 goto done;
11878         }
11879
11880         err = tg3_reset_hw(tp, 1);
11881         if (err) {
11882                 data[0] = TG3_LOOPBACK_FAILED;
11883                 data[1] = TG3_LOOPBACK_FAILED;
11884                 if (do_extlpbk)
11885                         data[2] = TG3_LOOPBACK_FAILED;
11886                 goto done;
11887         }
11888
11889         if (tg3_flag(tp, ENABLE_RSS)) {
11890                 int i;
11891
11892                 /* Reroute all rx packets to the 1st queue */
11893                 for (i = MAC_RSS_INDIR_TBL_0;
11894                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11895                         tw32(i, 0x0);
11896         }
11897
11898         /* HW errata - mac loopback fails in some cases on 5780.
11899          * Normal traffic and PHY loopback are not affected by
11900          * errata.  Also, the MAC loopback test is deprecated for
11901          * all newer ASIC revisions.
11902          */
11903         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11904             !tg3_flag(tp, CPMU_PRESENT)) {
11905                 tg3_mac_loopback(tp, true);
11906
11907                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11908                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11909
11910                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11911                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11912                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11913
11914                 tg3_mac_loopback(tp, false);
11915         }
11916
11917         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11918             !tg3_flag(tp, USE_PHYLIB)) {
11919                 int i;
11920
11921                 tg3_phy_lpbk_set(tp, 0, false);
11922
11923                 /* Wait for link */
11924                 for (i = 0; i < 100; i++) {
11925                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11926                                 break;
11927                         mdelay(1);
11928                 }
11929
11930                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11931                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11932                 if (tg3_flag(tp, TSO_CAPABLE) &&
11933                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11934                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11935                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11936                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11937                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11938
11939                 if (do_extlpbk) {
11940                         tg3_phy_lpbk_set(tp, 0, true);
11941
11942                         /* All link indications report up, but the hardware
11943                          * isn't really ready for about 20 msec.  Double it
11944                          * to be sure.
11945                          */
11946                         mdelay(40);
11947
11948                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11949                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11950                         if (tg3_flag(tp, TSO_CAPABLE) &&
11951                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11952                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11953                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11954                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11955                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11956                 }
11957
11958                 /* Re-enable gphy autopowerdown. */
11959                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11960                         tg3_phy_toggle_apd(tp, true);
11961         }
11962
11963         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11964
11965 done:
11966         tp->phy_flags |= eee_cap;
11967
11968         return err;
11969 }
11970
11971 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11972                           u64 *data)
11973 {
11974         struct tg3 *tp = netdev_priv(dev);
11975         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11976
11977         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11978             tg3_power_up(tp)) {
11979                 etest->flags |= ETH_TEST_FL_FAILED;
11980                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11981                 return;
11982         }
11983
11984         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11985
11986         if (tg3_test_nvram(tp) != 0) {
11987                 etest->flags |= ETH_TEST_FL_FAILED;
11988                 data[0] = 1;
11989         }
11990         if (!doextlpbk && tg3_test_link(tp)) {
11991                 etest->flags |= ETH_TEST_FL_FAILED;
11992                 data[1] = 1;
11993         }
11994         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11995                 int err, err2 = 0, irq_sync = 0;
11996
11997                 if (netif_running(dev)) {
11998                         tg3_phy_stop(tp);
11999                         tg3_netif_stop(tp);
12000                         irq_sync = 1;
12001                 }
12002
12003                 tg3_full_lock(tp, irq_sync);
12004
12005                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12006                 err = tg3_nvram_lock(tp);
12007                 tg3_halt_cpu(tp, RX_CPU_BASE);
12008                 if (!tg3_flag(tp, 5705_PLUS))
12009                         tg3_halt_cpu(tp, TX_CPU_BASE);
12010                 if (!err)
12011                         tg3_nvram_unlock(tp);
12012
12013                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12014                         tg3_phy_reset(tp);
12015
12016                 if (tg3_test_registers(tp) != 0) {
12017                         etest->flags |= ETH_TEST_FL_FAILED;
12018                         data[2] = 1;
12019                 }
12020
12021                 if (tg3_test_memory(tp) != 0) {
12022                         etest->flags |= ETH_TEST_FL_FAILED;
12023                         data[3] = 1;
12024                 }
12025
12026                 if (doextlpbk)
12027                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12028
12029                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12030                         etest->flags |= ETH_TEST_FL_FAILED;
12031
12032                 tg3_full_unlock(tp);
12033
12034                 if (tg3_test_interrupt(tp) != 0) {
12035                         etest->flags |= ETH_TEST_FL_FAILED;
12036                         data[7] = 1;
12037                 }
12038
12039                 tg3_full_lock(tp, 0);
12040
12041                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12042                 if (netif_running(dev)) {
12043                         tg3_flag_set(tp, INIT_COMPLETE);
12044                         err2 = tg3_restart_hw(tp, 1);
12045                         if (!err2)
12046                                 tg3_netif_start(tp);
12047                 }
12048
12049                 tg3_full_unlock(tp);
12050
12051                 if (irq_sync && !err2)
12052                         tg3_phy_start(tp);
12053         }
12054         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12055                 tg3_power_down(tp);
12056
12057 }
12058
12059 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12060 {
12061         struct mii_ioctl_data *data = if_mii(ifr);
12062         struct tg3 *tp = netdev_priv(dev);
12063         int err;
12064
12065         if (tg3_flag(tp, USE_PHYLIB)) {
12066                 struct phy_device *phydev;
12067                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12068                         return -EAGAIN;
12069                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12070                 return phy_mii_ioctl(phydev, ifr, cmd);
12071         }
12072
12073         switch (cmd) {
12074         case SIOCGMIIPHY:
12075                 data->phy_id = tp->phy_addr;
12076
12077                 /* fallthru */
12078         case SIOCGMIIREG: {
12079                 u32 mii_regval;
12080
12081                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12082                         break;                  /* We have no PHY */
12083
12084                 if (!netif_running(dev))
12085                         return -EAGAIN;
12086
12087                 spin_lock_bh(&tp->lock);
12088                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12089                 spin_unlock_bh(&tp->lock);
12090
12091                 data->val_out = mii_regval;
12092
12093                 return err;
12094         }
12095
12096         case SIOCSMIIREG:
12097                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12098                         break;                  /* We have no PHY */
12099
12100                 if (!netif_running(dev))
12101                         return -EAGAIN;
12102
12103                 spin_lock_bh(&tp->lock);
12104                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12105                 spin_unlock_bh(&tp->lock);
12106
12107                 return err;
12108
12109         default:
12110                 /* do nothing */
12111                 break;
12112         }
12113         return -EOPNOTSUPP;
12114 }
12115
12116 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12117 {
12118         struct tg3 *tp = netdev_priv(dev);
12119
12120         memcpy(ec, &tp->coal, sizeof(*ec));
12121         return 0;
12122 }
12123
12124 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12125 {
12126         struct tg3 *tp = netdev_priv(dev);
12127         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12128         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12129
12130         if (!tg3_flag(tp, 5705_PLUS)) {
12131                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12132                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12133                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12134                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12135         }
12136
12137         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12138             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12139             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12140             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12141             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12142             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12143             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12144             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12145             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12146             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12147                 return -EINVAL;
12148
12149         /* No rx interrupts will be generated if both are zero */
12150         if ((ec->rx_coalesce_usecs == 0) &&
12151             (ec->rx_max_coalesced_frames == 0))
12152                 return -EINVAL;
12153
12154         /* No tx interrupts will be generated if both are zero */
12155         if ((ec->tx_coalesce_usecs == 0) &&
12156             (ec->tx_max_coalesced_frames == 0))
12157                 return -EINVAL;
12158
12159         /* Only copy relevant parameters, ignore all others. */
12160         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12161         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12162         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12163         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12164         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12165         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12166         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12167         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12168         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12169
12170         if (netif_running(dev)) {
12171                 tg3_full_lock(tp, 0);
12172                 __tg3_set_coalesce(tp, &tp->coal);
12173                 tg3_full_unlock(tp);
12174         }
12175         return 0;
12176 }
12177
12178 static const struct ethtool_ops tg3_ethtool_ops = {
12179         .get_settings           = tg3_get_settings,
12180         .set_settings           = tg3_set_settings,
12181         .get_drvinfo            = tg3_get_drvinfo,
12182         .get_regs_len           = tg3_get_regs_len,
12183         .get_regs               = tg3_get_regs,
12184         .get_wol                = tg3_get_wol,
12185         .set_wol                = tg3_set_wol,
12186         .get_msglevel           = tg3_get_msglevel,
12187         .set_msglevel           = tg3_set_msglevel,
12188         .nway_reset             = tg3_nway_reset,
12189         .get_link               = ethtool_op_get_link,
12190         .get_eeprom_len         = tg3_get_eeprom_len,
12191         .get_eeprom             = tg3_get_eeprom,
12192         .set_eeprom             = tg3_set_eeprom,
12193         .get_ringparam          = tg3_get_ringparam,
12194         .set_ringparam          = tg3_set_ringparam,
12195         .get_pauseparam         = tg3_get_pauseparam,
12196         .set_pauseparam         = tg3_set_pauseparam,
12197         .self_test              = tg3_self_test,
12198         .get_strings            = tg3_get_strings,
12199         .set_phys_id            = tg3_set_phys_id,
12200         .get_ethtool_stats      = tg3_get_ethtool_stats,
12201         .get_coalesce           = tg3_get_coalesce,
12202         .set_coalesce           = tg3_set_coalesce,
12203         .get_sset_count         = tg3_get_sset_count,
12204         .get_rxnfc              = tg3_get_rxnfc,
12205         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12206         .get_rxfh_indir         = tg3_get_rxfh_indir,
12207         .set_rxfh_indir         = tg3_set_rxfh_indir,
12208 };
12209
12210 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12211                                                 struct rtnl_link_stats64 *stats)
12212 {
12213         struct tg3 *tp = netdev_priv(dev);
12214
12215         if (!tp->hw_stats)
12216                 return &tp->net_stats_prev;
12217
12218         spin_lock_bh(&tp->lock);
12219         tg3_get_nstats(tp, stats);
12220         spin_unlock_bh(&tp->lock);
12221
12222         return stats;
12223 }
12224
12225 static void tg3_set_rx_mode(struct net_device *dev)
12226 {
12227         struct tg3 *tp = netdev_priv(dev);
12228
12229         if (!netif_running(dev))
12230                 return;
12231
12232         tg3_full_lock(tp, 0);
12233         __tg3_set_rx_mode(dev);
12234         tg3_full_unlock(tp);
12235 }
12236
12237 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12238                                int new_mtu)
12239 {
12240         dev->mtu = new_mtu;
12241
12242         if (new_mtu > ETH_DATA_LEN) {
12243                 if (tg3_flag(tp, 5780_CLASS)) {
12244                         netdev_update_features(dev);
12245                         tg3_flag_clear(tp, TSO_CAPABLE);
12246                 } else {
12247                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12248                 }
12249         } else {
12250                 if (tg3_flag(tp, 5780_CLASS)) {
12251                         tg3_flag_set(tp, TSO_CAPABLE);
12252                         netdev_update_features(dev);
12253                 }
12254                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12255         }
12256 }
12257
12258 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12259 {
12260         struct tg3 *tp = netdev_priv(dev);
12261         int err;
12262
12263         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12264                 return -EINVAL;
12265
12266         if (!netif_running(dev)) {
12267                 /* We'll just catch it later when the
12268                  * device is up'd.
12269                  */
12270                 tg3_set_mtu(dev, tp, new_mtu);
12271                 return 0;
12272         }
12273
12274         tg3_phy_stop(tp);
12275
12276         tg3_netif_stop(tp);
12277
12278         tg3_full_lock(tp, 1);
12279
12280         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12281
12282         tg3_set_mtu(dev, tp, new_mtu);
12283
12284         err = tg3_restart_hw(tp, 0);
12285
12286         if (!err)
12287                 tg3_netif_start(tp);
12288
12289         tg3_full_unlock(tp);
12290
12291         if (!err)
12292                 tg3_phy_start(tp);
12293
12294         return err;
12295 }
12296
12297 static const struct net_device_ops tg3_netdev_ops = {
12298         .ndo_open               = tg3_open,
12299         .ndo_stop               = tg3_close,
12300         .ndo_start_xmit         = tg3_start_xmit,
12301         .ndo_get_stats64        = tg3_get_stats64,
12302         .ndo_validate_addr      = eth_validate_addr,
12303         .ndo_set_rx_mode        = tg3_set_rx_mode,
12304         .ndo_set_mac_address    = tg3_set_mac_addr,
12305         .ndo_do_ioctl           = tg3_ioctl,
12306         .ndo_tx_timeout         = tg3_tx_timeout,
12307         .ndo_change_mtu         = tg3_change_mtu,
12308         .ndo_fix_features       = tg3_fix_features,
12309         .ndo_set_features       = tg3_set_features,
12310 #ifdef CONFIG_NET_POLL_CONTROLLER
12311         .ndo_poll_controller    = tg3_poll_controller,
12312 #endif
12313 };
12314
12315 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12316 {
12317         u32 cursize, val, magic;
12318
12319         tp->nvram_size = EEPROM_CHIP_SIZE;
12320
12321         if (tg3_nvram_read(tp, 0, &magic) != 0)
12322                 return;
12323
12324         if ((magic != TG3_EEPROM_MAGIC) &&
12325             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12326             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12327                 return;
12328
12329         /*
12330          * Size the chip by reading offsets at increasing powers of two.
12331          * When we encounter our validation signature, we know the addressing
12332          * has wrapped around, and thus have our chip size.
12333          */
12334         cursize = 0x10;
12335
12336         while (cursize < tp->nvram_size) {
12337                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12338                         return;
12339
12340                 if (val == magic)
12341                         break;
12342
12343                 cursize <<= 1;
12344         }
12345
12346         tp->nvram_size = cursize;
12347 }
12348
12349 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12350 {
12351         u32 val;
12352
12353         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12354                 return;
12355
12356         /* Selfboot format */
12357         if (val != TG3_EEPROM_MAGIC) {
12358                 tg3_get_eeprom_size(tp);
12359                 return;
12360         }
12361
12362         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12363                 if (val != 0) {
12364                         /* This is confusing.  We want to operate on the
12365                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12366                          * call will read from NVRAM and byteswap the data
12367                          * according to the byteswapping settings for all
12368                          * other register accesses.  This ensures the data we
12369                          * want will always reside in the lower 16-bits.
12370                          * However, the data in NVRAM is in LE format, which
12371                          * means the data from the NVRAM read will always be
12372                          * opposite the endianness of the CPU.  The 16-bit
12373                          * byteswap then brings the data to CPU endianness.
12374                          */
12375                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12376                         return;
12377                 }
12378         }
12379         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12380 }
12381
12382 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12383 {
12384         u32 nvcfg1;
12385
12386         nvcfg1 = tr32(NVRAM_CFG1);
12387         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12388                 tg3_flag_set(tp, FLASH);
12389         } else {
12390                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12391                 tw32(NVRAM_CFG1, nvcfg1);
12392         }
12393
12394         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12395             tg3_flag(tp, 5780_CLASS)) {
12396                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12397                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12398                         tp->nvram_jedecnum = JEDEC_ATMEL;
12399                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12400                         tg3_flag_set(tp, NVRAM_BUFFERED);
12401                         break;
12402                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12403                         tp->nvram_jedecnum = JEDEC_ATMEL;
12404                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12405                         break;
12406                 case FLASH_VENDOR_ATMEL_EEPROM:
12407                         tp->nvram_jedecnum = JEDEC_ATMEL;
12408                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12409                         tg3_flag_set(tp, NVRAM_BUFFERED);
12410                         break;
12411                 case FLASH_VENDOR_ST:
12412                         tp->nvram_jedecnum = JEDEC_ST;
12413                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12414                         tg3_flag_set(tp, NVRAM_BUFFERED);
12415                         break;
12416                 case FLASH_VENDOR_SAIFUN:
12417                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12418                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12419                         break;
12420                 case FLASH_VENDOR_SST_SMALL:
12421                 case FLASH_VENDOR_SST_LARGE:
12422                         tp->nvram_jedecnum = JEDEC_SST;
12423                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12424                         break;
12425                 }
12426         } else {
12427                 tp->nvram_jedecnum = JEDEC_ATMEL;
12428                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12429                 tg3_flag_set(tp, NVRAM_BUFFERED);
12430         }
12431 }
12432
12433 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12434 {
12435         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12436         case FLASH_5752PAGE_SIZE_256:
12437                 tp->nvram_pagesize = 256;
12438                 break;
12439         case FLASH_5752PAGE_SIZE_512:
12440                 tp->nvram_pagesize = 512;
12441                 break;
12442         case FLASH_5752PAGE_SIZE_1K:
12443                 tp->nvram_pagesize = 1024;
12444                 break;
12445         case FLASH_5752PAGE_SIZE_2K:
12446                 tp->nvram_pagesize = 2048;
12447                 break;
12448         case FLASH_5752PAGE_SIZE_4K:
12449                 tp->nvram_pagesize = 4096;
12450                 break;
12451         case FLASH_5752PAGE_SIZE_264:
12452                 tp->nvram_pagesize = 264;
12453                 break;
12454         case FLASH_5752PAGE_SIZE_528:
12455                 tp->nvram_pagesize = 528;
12456                 break;
12457         }
12458 }
12459
12460 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12461 {
12462         u32 nvcfg1;
12463
12464         nvcfg1 = tr32(NVRAM_CFG1);
12465
12466         /* NVRAM protection for TPM */
12467         if (nvcfg1 & (1 << 27))
12468                 tg3_flag_set(tp, PROTECTED_NVRAM);
12469
12470         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12471         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12472         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12473                 tp->nvram_jedecnum = JEDEC_ATMEL;
12474                 tg3_flag_set(tp, NVRAM_BUFFERED);
12475                 break;
12476         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12477                 tp->nvram_jedecnum = JEDEC_ATMEL;
12478                 tg3_flag_set(tp, NVRAM_BUFFERED);
12479                 tg3_flag_set(tp, FLASH);
12480                 break;
12481         case FLASH_5752VENDOR_ST_M45PE10:
12482         case FLASH_5752VENDOR_ST_M45PE20:
12483         case FLASH_5752VENDOR_ST_M45PE40:
12484                 tp->nvram_jedecnum = JEDEC_ST;
12485                 tg3_flag_set(tp, NVRAM_BUFFERED);
12486                 tg3_flag_set(tp, FLASH);
12487                 break;
12488         }
12489
12490         if (tg3_flag(tp, FLASH)) {
12491                 tg3_nvram_get_pagesize(tp, nvcfg1);
12492         } else {
12493                 /* For eeprom, set pagesize to maximum eeprom size */
12494                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12495
12496                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12497                 tw32(NVRAM_CFG1, nvcfg1);
12498         }
12499 }
12500
12501 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12502 {
12503         u32 nvcfg1, protect = 0;
12504
12505         nvcfg1 = tr32(NVRAM_CFG1);
12506
12507         /* NVRAM protection for TPM */
12508         if (nvcfg1 & (1 << 27)) {
12509                 tg3_flag_set(tp, PROTECTED_NVRAM);
12510                 protect = 1;
12511         }
12512
12513         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12514         switch (nvcfg1) {
12515         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12516         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12517         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12518         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12519                 tp->nvram_jedecnum = JEDEC_ATMEL;
12520                 tg3_flag_set(tp, NVRAM_BUFFERED);
12521                 tg3_flag_set(tp, FLASH);
12522                 tp->nvram_pagesize = 264;
12523                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12524                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12525                         tp->nvram_size = (protect ? 0x3e200 :
12526                                           TG3_NVRAM_SIZE_512KB);
12527                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12528                         tp->nvram_size = (protect ? 0x1f200 :
12529                                           TG3_NVRAM_SIZE_256KB);
12530                 else
12531                         tp->nvram_size = (protect ? 0x1f200 :
12532                                           TG3_NVRAM_SIZE_128KB);
12533                 break;
12534         case FLASH_5752VENDOR_ST_M45PE10:
12535         case FLASH_5752VENDOR_ST_M45PE20:
12536         case FLASH_5752VENDOR_ST_M45PE40:
12537                 tp->nvram_jedecnum = JEDEC_ST;
12538                 tg3_flag_set(tp, NVRAM_BUFFERED);
12539                 tg3_flag_set(tp, FLASH);
12540                 tp->nvram_pagesize = 256;
12541                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12542                         tp->nvram_size = (protect ?
12543                                           TG3_NVRAM_SIZE_64KB :
12544                                           TG3_NVRAM_SIZE_128KB);
12545                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12546                         tp->nvram_size = (protect ?
12547                                           TG3_NVRAM_SIZE_64KB :
12548                                           TG3_NVRAM_SIZE_256KB);
12549                 else
12550                         tp->nvram_size = (protect ?
12551                                           TG3_NVRAM_SIZE_128KB :
12552                                           TG3_NVRAM_SIZE_512KB);
12553                 break;
12554         }
12555 }
12556
12557 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12558 {
12559         u32 nvcfg1;
12560
12561         nvcfg1 = tr32(NVRAM_CFG1);
12562
12563         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12564         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12565         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12566         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12567         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12568                 tp->nvram_jedecnum = JEDEC_ATMEL;
12569                 tg3_flag_set(tp, NVRAM_BUFFERED);
12570                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12571
12572                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12573                 tw32(NVRAM_CFG1, nvcfg1);
12574                 break;
12575         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12576         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12577         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12578         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12579                 tp->nvram_jedecnum = JEDEC_ATMEL;
12580                 tg3_flag_set(tp, NVRAM_BUFFERED);
12581                 tg3_flag_set(tp, FLASH);
12582                 tp->nvram_pagesize = 264;
12583                 break;
12584         case FLASH_5752VENDOR_ST_M45PE10:
12585         case FLASH_5752VENDOR_ST_M45PE20:
12586         case FLASH_5752VENDOR_ST_M45PE40:
12587                 tp->nvram_jedecnum = JEDEC_ST;
12588                 tg3_flag_set(tp, NVRAM_BUFFERED);
12589                 tg3_flag_set(tp, FLASH);
12590                 tp->nvram_pagesize = 256;
12591                 break;
12592         }
12593 }
12594
12595 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12596 {
12597         u32 nvcfg1, protect = 0;
12598
12599         nvcfg1 = tr32(NVRAM_CFG1);
12600
12601         /* NVRAM protection for TPM */
12602         if (nvcfg1 & (1 << 27)) {
12603                 tg3_flag_set(tp, PROTECTED_NVRAM);
12604                 protect = 1;
12605         }
12606
12607         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12608         switch (nvcfg1) {
12609         case FLASH_5761VENDOR_ATMEL_ADB021D:
12610         case FLASH_5761VENDOR_ATMEL_ADB041D:
12611         case FLASH_5761VENDOR_ATMEL_ADB081D:
12612         case FLASH_5761VENDOR_ATMEL_ADB161D:
12613         case FLASH_5761VENDOR_ATMEL_MDB021D:
12614         case FLASH_5761VENDOR_ATMEL_MDB041D:
12615         case FLASH_5761VENDOR_ATMEL_MDB081D:
12616         case FLASH_5761VENDOR_ATMEL_MDB161D:
12617                 tp->nvram_jedecnum = JEDEC_ATMEL;
12618                 tg3_flag_set(tp, NVRAM_BUFFERED);
12619                 tg3_flag_set(tp, FLASH);
12620                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12621                 tp->nvram_pagesize = 256;
12622                 break;
12623         case FLASH_5761VENDOR_ST_A_M45PE20:
12624         case FLASH_5761VENDOR_ST_A_M45PE40:
12625         case FLASH_5761VENDOR_ST_A_M45PE80:
12626         case FLASH_5761VENDOR_ST_A_M45PE16:
12627         case FLASH_5761VENDOR_ST_M_M45PE20:
12628         case FLASH_5761VENDOR_ST_M_M45PE40:
12629         case FLASH_5761VENDOR_ST_M_M45PE80:
12630         case FLASH_5761VENDOR_ST_M_M45PE16:
12631                 tp->nvram_jedecnum = JEDEC_ST;
12632                 tg3_flag_set(tp, NVRAM_BUFFERED);
12633                 tg3_flag_set(tp, FLASH);
12634                 tp->nvram_pagesize = 256;
12635                 break;
12636         }
12637
12638         if (protect) {
12639                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12640         } else {
12641                 switch (nvcfg1) {
12642                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12643                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12644                 case FLASH_5761VENDOR_ST_A_M45PE16:
12645                 case FLASH_5761VENDOR_ST_M_M45PE16:
12646                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12647                         break;
12648                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12649                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12650                 case FLASH_5761VENDOR_ST_A_M45PE80:
12651                 case FLASH_5761VENDOR_ST_M_M45PE80:
12652                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12653                         break;
12654                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12655                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12656                 case FLASH_5761VENDOR_ST_A_M45PE40:
12657                 case FLASH_5761VENDOR_ST_M_M45PE40:
12658                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12659                         break;
12660                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12661                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12662                 case FLASH_5761VENDOR_ST_A_M45PE20:
12663                 case FLASH_5761VENDOR_ST_M_M45PE20:
12664                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12665                         break;
12666                 }
12667         }
12668 }
12669
12670 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12671 {
12672         tp->nvram_jedecnum = JEDEC_ATMEL;
12673         tg3_flag_set(tp, NVRAM_BUFFERED);
12674         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12675 }
12676
12677 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12678 {
12679         u32 nvcfg1;
12680
12681         nvcfg1 = tr32(NVRAM_CFG1);
12682
12683         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12684         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12685         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12686                 tp->nvram_jedecnum = JEDEC_ATMEL;
12687                 tg3_flag_set(tp, NVRAM_BUFFERED);
12688                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12689
12690                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12691                 tw32(NVRAM_CFG1, nvcfg1);
12692                 return;
12693         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12694         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12695         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12696         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12697         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12698         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12699         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12700                 tp->nvram_jedecnum = JEDEC_ATMEL;
12701                 tg3_flag_set(tp, NVRAM_BUFFERED);
12702                 tg3_flag_set(tp, FLASH);
12703
12704                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12705                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12706                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12707                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12708                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12709                         break;
12710                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12711                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12712                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12713                         break;
12714                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12715                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12716                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12717                         break;
12718                 }
12719                 break;
12720         case FLASH_5752VENDOR_ST_M45PE10:
12721         case FLASH_5752VENDOR_ST_M45PE20:
12722         case FLASH_5752VENDOR_ST_M45PE40:
12723                 tp->nvram_jedecnum = JEDEC_ST;
12724                 tg3_flag_set(tp, NVRAM_BUFFERED);
12725                 tg3_flag_set(tp, FLASH);
12726
12727                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12728                 case FLASH_5752VENDOR_ST_M45PE10:
12729                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12730                         break;
12731                 case FLASH_5752VENDOR_ST_M45PE20:
12732                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12733                         break;
12734                 case FLASH_5752VENDOR_ST_M45PE40:
12735                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12736                         break;
12737                 }
12738                 break;
12739         default:
12740                 tg3_flag_set(tp, NO_NVRAM);
12741                 return;
12742         }
12743
12744         tg3_nvram_get_pagesize(tp, nvcfg1);
12745         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12746                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12747 }
12748
12749
12750 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12751 {
12752         u32 nvcfg1;
12753
12754         nvcfg1 = tr32(NVRAM_CFG1);
12755
12756         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12757         case FLASH_5717VENDOR_ATMEL_EEPROM:
12758         case FLASH_5717VENDOR_MICRO_EEPROM:
12759                 tp->nvram_jedecnum = JEDEC_ATMEL;
12760                 tg3_flag_set(tp, NVRAM_BUFFERED);
12761                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12762
12763                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12764                 tw32(NVRAM_CFG1, nvcfg1);
12765                 return;
12766         case FLASH_5717VENDOR_ATMEL_MDB011D:
12767         case FLASH_5717VENDOR_ATMEL_ADB011B:
12768         case FLASH_5717VENDOR_ATMEL_ADB011D:
12769         case FLASH_5717VENDOR_ATMEL_MDB021D:
12770         case FLASH_5717VENDOR_ATMEL_ADB021B:
12771         case FLASH_5717VENDOR_ATMEL_ADB021D:
12772         case FLASH_5717VENDOR_ATMEL_45USPT:
12773                 tp->nvram_jedecnum = JEDEC_ATMEL;
12774                 tg3_flag_set(tp, NVRAM_BUFFERED);
12775                 tg3_flag_set(tp, FLASH);
12776
12777                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12778                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12779                         /* Detect size with tg3_nvram_get_size() */
12780                         break;
12781                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12782                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12783                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12784                         break;
12785                 default:
12786                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12787                         break;
12788                 }
12789                 break;
12790         case FLASH_5717VENDOR_ST_M_M25PE10:
12791         case FLASH_5717VENDOR_ST_A_M25PE10:
12792         case FLASH_5717VENDOR_ST_M_M45PE10:
12793         case FLASH_5717VENDOR_ST_A_M45PE10:
12794         case FLASH_5717VENDOR_ST_M_M25PE20:
12795         case FLASH_5717VENDOR_ST_A_M25PE20:
12796         case FLASH_5717VENDOR_ST_M_M45PE20:
12797         case FLASH_5717VENDOR_ST_A_M45PE20:
12798         case FLASH_5717VENDOR_ST_25USPT:
12799         case FLASH_5717VENDOR_ST_45USPT:
12800                 tp->nvram_jedecnum = JEDEC_ST;
12801                 tg3_flag_set(tp, NVRAM_BUFFERED);
12802                 tg3_flag_set(tp, FLASH);
12803
12804                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12805                 case FLASH_5717VENDOR_ST_M_M25PE20:
12806                 case FLASH_5717VENDOR_ST_M_M45PE20:
12807                         /* Detect size with tg3_nvram_get_size() */
12808                         break;
12809                 case FLASH_5717VENDOR_ST_A_M25PE20:
12810                 case FLASH_5717VENDOR_ST_A_M45PE20:
12811                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12812                         break;
12813                 default:
12814                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12815                         break;
12816                 }
12817                 break;
12818         default:
12819                 tg3_flag_set(tp, NO_NVRAM);
12820                 return;
12821         }
12822
12823         tg3_nvram_get_pagesize(tp, nvcfg1);
12824         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12825                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12826 }
12827
12828 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12829 {
12830         u32 nvcfg1, nvmpinstrp;
12831
12832         nvcfg1 = tr32(NVRAM_CFG1);
12833         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12834
12835         switch (nvmpinstrp) {
12836         case FLASH_5720_EEPROM_HD:
12837         case FLASH_5720_EEPROM_LD:
12838                 tp->nvram_jedecnum = JEDEC_ATMEL;
12839                 tg3_flag_set(tp, NVRAM_BUFFERED);
12840
12841                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12842                 tw32(NVRAM_CFG1, nvcfg1);
12843                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12844                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12845                 else
12846                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12847                 return;
12848         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12849         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12850         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12851         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12852         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12853         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12854         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12855         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12856         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12857         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12858         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12859         case FLASH_5720VENDOR_ATMEL_45USPT:
12860                 tp->nvram_jedecnum = JEDEC_ATMEL;
12861                 tg3_flag_set(tp, NVRAM_BUFFERED);
12862                 tg3_flag_set(tp, FLASH);
12863
12864                 switch (nvmpinstrp) {
12865                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12866                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12867                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12868                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12869                         break;
12870                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12871                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12872                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12873                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12874                         break;
12875                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12876                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12877                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12878                         break;
12879                 default:
12880                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12881                         break;
12882                 }
12883                 break;
12884         case FLASH_5720VENDOR_M_ST_M25PE10:
12885         case FLASH_5720VENDOR_M_ST_M45PE10:
12886         case FLASH_5720VENDOR_A_ST_M25PE10:
12887         case FLASH_5720VENDOR_A_ST_M45PE10:
12888         case FLASH_5720VENDOR_M_ST_M25PE20:
12889         case FLASH_5720VENDOR_M_ST_M45PE20:
12890         case FLASH_5720VENDOR_A_ST_M25PE20:
12891         case FLASH_5720VENDOR_A_ST_M45PE20:
12892         case FLASH_5720VENDOR_M_ST_M25PE40:
12893         case FLASH_5720VENDOR_M_ST_M45PE40:
12894         case FLASH_5720VENDOR_A_ST_M25PE40:
12895         case FLASH_5720VENDOR_A_ST_M45PE40:
12896         case FLASH_5720VENDOR_M_ST_M25PE80:
12897         case FLASH_5720VENDOR_M_ST_M45PE80:
12898         case FLASH_5720VENDOR_A_ST_M25PE80:
12899         case FLASH_5720VENDOR_A_ST_M45PE80:
12900         case FLASH_5720VENDOR_ST_25USPT:
12901         case FLASH_5720VENDOR_ST_45USPT:
12902                 tp->nvram_jedecnum = JEDEC_ST;
12903                 tg3_flag_set(tp, NVRAM_BUFFERED);
12904                 tg3_flag_set(tp, FLASH);
12905
12906                 switch (nvmpinstrp) {
12907                 case FLASH_5720VENDOR_M_ST_M25PE20:
12908                 case FLASH_5720VENDOR_M_ST_M45PE20:
12909                 case FLASH_5720VENDOR_A_ST_M25PE20:
12910                 case FLASH_5720VENDOR_A_ST_M45PE20:
12911                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12912                         break;
12913                 case FLASH_5720VENDOR_M_ST_M25PE40:
12914                 case FLASH_5720VENDOR_M_ST_M45PE40:
12915                 case FLASH_5720VENDOR_A_ST_M25PE40:
12916                 case FLASH_5720VENDOR_A_ST_M45PE40:
12917                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12918                         break;
12919                 case FLASH_5720VENDOR_M_ST_M25PE80:
12920                 case FLASH_5720VENDOR_M_ST_M45PE80:
12921                 case FLASH_5720VENDOR_A_ST_M25PE80:
12922                 case FLASH_5720VENDOR_A_ST_M45PE80:
12923                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12924                         break;
12925                 default:
12926                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12927                         break;
12928                 }
12929                 break;
12930         default:
12931                 tg3_flag_set(tp, NO_NVRAM);
12932                 return;
12933         }
12934
12935         tg3_nvram_get_pagesize(tp, nvcfg1);
12936         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12937                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12938 }
12939
12940 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12941 static void __devinit tg3_nvram_init(struct tg3 *tp)
12942 {
12943         tw32_f(GRC_EEPROM_ADDR,
12944              (EEPROM_ADDR_FSM_RESET |
12945               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12946                EEPROM_ADDR_CLKPERD_SHIFT)));
12947
12948         msleep(1);
12949
12950         /* Enable seeprom accesses. */
12951         tw32_f(GRC_LOCAL_CTRL,
12952              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12953         udelay(100);
12954
12955         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12956             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12957                 tg3_flag_set(tp, NVRAM);
12958
12959                 if (tg3_nvram_lock(tp)) {
12960                         netdev_warn(tp->dev,
12961                                     "Cannot get nvram lock, %s failed\n",
12962                                     __func__);
12963                         return;
12964                 }
12965                 tg3_enable_nvram_access(tp);
12966
12967                 tp->nvram_size = 0;
12968
12969                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12970                         tg3_get_5752_nvram_info(tp);
12971                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12972                         tg3_get_5755_nvram_info(tp);
12973                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12974                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12975                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12976                         tg3_get_5787_nvram_info(tp);
12977                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12978                         tg3_get_5761_nvram_info(tp);
12979                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12980                         tg3_get_5906_nvram_info(tp);
12981                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12982                          tg3_flag(tp, 57765_CLASS))
12983                         tg3_get_57780_nvram_info(tp);
12984                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12985                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12986                         tg3_get_5717_nvram_info(tp);
12987                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12988                         tg3_get_5720_nvram_info(tp);
12989                 else
12990                         tg3_get_nvram_info(tp);
12991
12992                 if (tp->nvram_size == 0)
12993                         tg3_get_nvram_size(tp);
12994
12995                 tg3_disable_nvram_access(tp);
12996                 tg3_nvram_unlock(tp);
12997
12998         } else {
12999                 tg3_flag_clear(tp, NVRAM);
13000                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13001
13002                 tg3_get_eeprom_size(tp);
13003         }
13004 }
13005
13006 struct subsys_tbl_ent {
13007         u16 subsys_vendor, subsys_devid;
13008         u32 phy_id;
13009 };
13010
13011 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13012         /* Broadcom boards. */
13013         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13014           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13015         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13016           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13017         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13018           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13019         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13020           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13021         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13022           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13023         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13024           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13025         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13026           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13027         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13028           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13029         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13030           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13031         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13032           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13033         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13034           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13035
13036         /* 3com boards. */
13037         { TG3PCI_SUBVENDOR_ID_3COM,
13038           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13039         { TG3PCI_SUBVENDOR_ID_3COM,
13040           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13041         { TG3PCI_SUBVENDOR_ID_3COM,
13042           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13043         { TG3PCI_SUBVENDOR_ID_3COM,
13044           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13045         { TG3PCI_SUBVENDOR_ID_3COM,
13046           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13047
13048         /* DELL boards. */
13049         { TG3PCI_SUBVENDOR_ID_DELL,
13050           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13051         { TG3PCI_SUBVENDOR_ID_DELL,
13052           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13053         { TG3PCI_SUBVENDOR_ID_DELL,
13054           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13055         { TG3PCI_SUBVENDOR_ID_DELL,
13056           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13057
13058         /* Compaq boards. */
13059         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13060           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13061         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13062           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13063         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13064           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13065         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13066           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13067         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13068           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13069
13070         /* IBM boards. */
13071         { TG3PCI_SUBVENDOR_ID_IBM,
13072           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13073 };
13074
13075 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13076 {
13077         int i;
13078
13079         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13080                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13081                      tp->pdev->subsystem_vendor) &&
13082                     (subsys_id_to_phy_id[i].subsys_devid ==
13083                      tp->pdev->subsystem_device))
13084                         return &subsys_id_to_phy_id[i];
13085         }
13086         return NULL;
13087 }
13088
13089 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13090 {
13091         u32 val;
13092
13093         tp->phy_id = TG3_PHY_ID_INVALID;
13094         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13095
13096         /* Assume an onboard device and WOL capable by default.  */
13097         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13098         tg3_flag_set(tp, WOL_CAP);
13099
13100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13101                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13102                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13103                         tg3_flag_set(tp, IS_NIC);
13104                 }
13105                 val = tr32(VCPU_CFGSHDW);
13106                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13107                         tg3_flag_set(tp, ASPM_WORKAROUND);
13108                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13109                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13110                         tg3_flag_set(tp, WOL_ENABLE);
13111                         device_set_wakeup_enable(&tp->pdev->dev, true);
13112                 }
13113                 goto done;
13114         }
13115
13116         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13117         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13118                 u32 nic_cfg, led_cfg;
13119                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13120                 int eeprom_phy_serdes = 0;
13121
13122                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13123                 tp->nic_sram_data_cfg = nic_cfg;
13124
13125                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13126                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13127                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13128                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13129                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13130                     (ver > 0) && (ver < 0x100))
13131                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13132
13133                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13134                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13135
13136                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13137                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13138                         eeprom_phy_serdes = 1;
13139
13140                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13141                 if (nic_phy_id != 0) {
13142                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13143                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13144
13145                         eeprom_phy_id  = (id1 >> 16) << 10;
13146                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13147                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13148                 } else
13149                         eeprom_phy_id = 0;
13150
13151                 tp->phy_id = eeprom_phy_id;
13152                 if (eeprom_phy_serdes) {
13153                         if (!tg3_flag(tp, 5705_PLUS))
13154                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13155                         else
13156                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13157                 }
13158
13159                 if (tg3_flag(tp, 5750_PLUS))
13160                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13161                                     SHASTA_EXT_LED_MODE_MASK);
13162                 else
13163                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13164
13165                 switch (led_cfg) {
13166                 default:
13167                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13168                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13169                         break;
13170
13171                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13172                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13173                         break;
13174
13175                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13176                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13177
13178                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13179                          * read on some older 5700/5701 bootcode.
13180                          */
13181                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13182                             ASIC_REV_5700 ||
13183                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13184                             ASIC_REV_5701)
13185                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13186
13187                         break;
13188
13189                 case SHASTA_EXT_LED_SHARED:
13190                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13191                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13192                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13193                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13194                                                  LED_CTRL_MODE_PHY_2);
13195                         break;
13196
13197                 case SHASTA_EXT_LED_MAC:
13198                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13199                         break;
13200
13201                 case SHASTA_EXT_LED_COMBO:
13202                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13203                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13204                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13205                                                  LED_CTRL_MODE_PHY_2);
13206                         break;
13207
13208                 }
13209
13210                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13211                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13212                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13213                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13214
13215                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13216                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13217
13218                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13219                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13220                         if ((tp->pdev->subsystem_vendor ==
13221                              PCI_VENDOR_ID_ARIMA) &&
13222                             (tp->pdev->subsystem_device == 0x205a ||
13223                              tp->pdev->subsystem_device == 0x2063))
13224                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13225                 } else {
13226                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13227                         tg3_flag_set(tp, IS_NIC);
13228                 }
13229
13230                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13231                         tg3_flag_set(tp, ENABLE_ASF);
13232                         if (tg3_flag(tp, 5750_PLUS))
13233                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13234                 }
13235
13236                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13237                     tg3_flag(tp, 5750_PLUS))
13238                         tg3_flag_set(tp, ENABLE_APE);
13239
13240                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13241                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13242                         tg3_flag_clear(tp, WOL_CAP);
13243
13244                 if (tg3_flag(tp, WOL_CAP) &&
13245                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13246                         tg3_flag_set(tp, WOL_ENABLE);
13247                         device_set_wakeup_enable(&tp->pdev->dev, true);
13248                 }
13249
13250                 if (cfg2 & (1 << 17))
13251                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13252
13253                 /* serdes signal pre-emphasis in register 0x590 set by */
13254                 /* bootcode if bit 18 is set */
13255                 if (cfg2 & (1 << 18))
13256                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13257
13258                 if ((tg3_flag(tp, 57765_PLUS) ||
13259                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13260                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13261                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13262                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13263
13264                 if (tg3_flag(tp, PCI_EXPRESS) &&
13265                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13266                     !tg3_flag(tp, 57765_PLUS)) {
13267                         u32 cfg3;
13268
13269                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13270                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13271                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13272                 }
13273
13274                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13275                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13276                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13277                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13278                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13279                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13280         }
13281 done:
13282         if (tg3_flag(tp, WOL_CAP))
13283                 device_set_wakeup_enable(&tp->pdev->dev,
13284                                          tg3_flag(tp, WOL_ENABLE));
13285         else
13286                 device_set_wakeup_capable(&tp->pdev->dev, false);
13287 }
13288
13289 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13290 {
13291         int i;
13292         u32 val;
13293
13294         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13295         tw32(OTP_CTRL, cmd);
13296
13297         /* Wait for up to 1 ms for command to execute. */
13298         for (i = 0; i < 100; i++) {
13299                 val = tr32(OTP_STATUS);
13300                 if (val & OTP_STATUS_CMD_DONE)
13301                         break;
13302                 udelay(10);
13303         }
13304
13305         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13306 }
13307
13308 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13309  * configuration is a 32-bit value that straddles the alignment boundary.
13310  * We do two 32-bit reads and then shift and merge the results.
13311  */
13312 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13313 {
13314         u32 bhalf_otp, thalf_otp;
13315
13316         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13317
13318         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13319                 return 0;
13320
13321         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13322
13323         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13324                 return 0;
13325
13326         thalf_otp = tr32(OTP_READ_DATA);
13327
13328         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13329
13330         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13331                 return 0;
13332
13333         bhalf_otp = tr32(OTP_READ_DATA);
13334
13335         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13336 }
13337
13338 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13339 {
13340         u32 adv = ADVERTISED_Autoneg;
13341
13342         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13343                 adv |= ADVERTISED_1000baseT_Half |
13344                        ADVERTISED_1000baseT_Full;
13345
13346         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13347                 adv |= ADVERTISED_100baseT_Half |
13348                        ADVERTISED_100baseT_Full |
13349                        ADVERTISED_10baseT_Half |
13350                        ADVERTISED_10baseT_Full |
13351                        ADVERTISED_TP;
13352         else
13353                 adv |= ADVERTISED_FIBRE;
13354
13355         tp->link_config.advertising = adv;
13356         tp->link_config.speed = SPEED_UNKNOWN;
13357         tp->link_config.duplex = DUPLEX_UNKNOWN;
13358         tp->link_config.autoneg = AUTONEG_ENABLE;
13359         tp->link_config.active_speed = SPEED_UNKNOWN;
13360         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13361
13362         tp->old_link = -1;
13363 }
13364
13365 static int __devinit tg3_phy_probe(struct tg3 *tp)
13366 {
13367         u32 hw_phy_id_1, hw_phy_id_2;
13368         u32 hw_phy_id, hw_phy_id_masked;
13369         int err;
13370
13371         /* flow control autonegotiation is default behavior */
13372         tg3_flag_set(tp, PAUSE_AUTONEG);
13373         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13374
13375         if (tg3_flag(tp, USE_PHYLIB))
13376                 return tg3_phy_init(tp);
13377
13378         /* Reading the PHY ID register can conflict with ASF
13379          * firmware access to the PHY hardware.
13380          */
13381         err = 0;
13382         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13383                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13384         } else {
13385                 /* Now read the physical PHY_ID from the chip and verify
13386                  * that it is sane.  If it doesn't look good, we fall back
13387                  * to either the hard-coded table based PHY_ID and failing
13388                  * that the value found in the eeprom area.
13389                  */
13390                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13391                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13392
13393                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13394                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13395                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13396
13397                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13398         }
13399
13400         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13401                 tp->phy_id = hw_phy_id;
13402                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13403                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13404                 else
13405                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13406         } else {
13407                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13408                         /* Do nothing, phy ID already set up in
13409                          * tg3_get_eeprom_hw_cfg().
13410                          */
13411                 } else {
13412                         struct subsys_tbl_ent *p;
13413
13414                         /* No eeprom signature?  Try the hardcoded
13415                          * subsys device table.
13416                          */
13417                         p = tg3_lookup_by_subsys(tp);
13418                         if (!p)
13419                                 return -ENODEV;
13420
13421                         tp->phy_id = p->phy_id;
13422                         if (!tp->phy_id ||
13423                             tp->phy_id == TG3_PHY_ID_BCM8002)
13424                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13425                 }
13426         }
13427
13428         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13429             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13430              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13431              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13432               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13433              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13434               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13435                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13436
13437         tg3_phy_init_link_config(tp);
13438
13439         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13440             !tg3_flag(tp, ENABLE_APE) &&
13441             !tg3_flag(tp, ENABLE_ASF)) {
13442                 u32 bmsr, dummy;
13443
13444                 tg3_readphy(tp, MII_BMSR, &bmsr);
13445                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13446                     (bmsr & BMSR_LSTATUS))
13447                         goto skip_phy_reset;
13448
13449                 err = tg3_phy_reset(tp);
13450                 if (err)
13451                         return err;
13452
13453                 tg3_phy_set_wirespeed(tp);
13454
13455                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13456                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13457                                             tp->link_config.flowctrl);
13458
13459                         tg3_writephy(tp, MII_BMCR,
13460                                      BMCR_ANENABLE | BMCR_ANRESTART);
13461                 }
13462         }
13463
13464 skip_phy_reset:
13465         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13466                 err = tg3_init_5401phy_dsp(tp);
13467                 if (err)
13468                         return err;
13469
13470                 err = tg3_init_5401phy_dsp(tp);
13471         }
13472
13473         return err;
13474 }
13475
13476 static void __devinit tg3_read_vpd(struct tg3 *tp)
13477 {
13478         u8 *vpd_data;
13479         unsigned int block_end, rosize, len;
13480         u32 vpdlen;
13481         int j, i = 0;
13482
13483         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13484         if (!vpd_data)
13485                 goto out_no_vpd;
13486
13487         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13488         if (i < 0)
13489                 goto out_not_found;
13490
13491         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13492         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13493         i += PCI_VPD_LRDT_TAG_SIZE;
13494
13495         if (block_end > vpdlen)
13496                 goto out_not_found;
13497
13498         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13499                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13500         if (j > 0) {
13501                 len = pci_vpd_info_field_size(&vpd_data[j]);
13502
13503                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13504                 if (j + len > block_end || len != 4 ||
13505                     memcmp(&vpd_data[j], "1028", 4))
13506                         goto partno;
13507
13508                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13509                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13510                 if (j < 0)
13511                         goto partno;
13512
13513                 len = pci_vpd_info_field_size(&vpd_data[j]);
13514
13515                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13516                 if (j + len > block_end)
13517                         goto partno;
13518
13519                 memcpy(tp->fw_ver, &vpd_data[j], len);
13520                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13521         }
13522
13523 partno:
13524         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13525                                       PCI_VPD_RO_KEYWORD_PARTNO);
13526         if (i < 0)
13527                 goto out_not_found;
13528
13529         len = pci_vpd_info_field_size(&vpd_data[i]);
13530
13531         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13532         if (len > TG3_BPN_SIZE ||
13533             (len + i) > vpdlen)
13534                 goto out_not_found;
13535
13536         memcpy(tp->board_part_number, &vpd_data[i], len);
13537
13538 out_not_found:
13539         kfree(vpd_data);
13540         if (tp->board_part_number[0])
13541                 return;
13542
13543 out_no_vpd:
13544         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13545                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13546                         strcpy(tp->board_part_number, "BCM5717");
13547                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13548                         strcpy(tp->board_part_number, "BCM5718");
13549                 else
13550                         goto nomatch;
13551         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13552                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13553                         strcpy(tp->board_part_number, "BCM57780");
13554                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13555                         strcpy(tp->board_part_number, "BCM57760");
13556                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13557                         strcpy(tp->board_part_number, "BCM57790");
13558                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13559                         strcpy(tp->board_part_number, "BCM57788");
13560                 else
13561                         goto nomatch;
13562         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13563                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13564                         strcpy(tp->board_part_number, "BCM57761");
13565                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13566                         strcpy(tp->board_part_number, "BCM57765");
13567                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13568                         strcpy(tp->board_part_number, "BCM57781");
13569                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13570                         strcpy(tp->board_part_number, "BCM57785");
13571                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13572                         strcpy(tp->board_part_number, "BCM57791");
13573                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13574                         strcpy(tp->board_part_number, "BCM57795");
13575                 else
13576                         goto nomatch;
13577         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13578                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13579                         strcpy(tp->board_part_number, "BCM57762");
13580                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13581                         strcpy(tp->board_part_number, "BCM57766");
13582                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13583                         strcpy(tp->board_part_number, "BCM57782");
13584                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13585                         strcpy(tp->board_part_number, "BCM57786");
13586                 else
13587                         goto nomatch;
13588         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13589                 strcpy(tp->board_part_number, "BCM95906");
13590         } else {
13591 nomatch:
13592                 strcpy(tp->board_part_number, "none");
13593         }
13594 }
13595
13596 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13597 {
13598         u32 val;
13599
13600         if (tg3_nvram_read(tp, offset, &val) ||
13601             (val & 0xfc000000) != 0x0c000000 ||
13602             tg3_nvram_read(tp, offset + 4, &val) ||
13603             val != 0)
13604                 return 0;
13605
13606         return 1;
13607 }
13608
13609 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13610 {
13611         u32 val, offset, start, ver_offset;
13612         int i, dst_off;
13613         bool newver = false;
13614
13615         if (tg3_nvram_read(tp, 0xc, &offset) ||
13616             tg3_nvram_read(tp, 0x4, &start))
13617                 return;
13618
13619         offset = tg3_nvram_logical_addr(tp, offset);
13620
13621         if (tg3_nvram_read(tp, offset, &val))
13622                 return;
13623
13624         if ((val & 0xfc000000) == 0x0c000000) {
13625                 if (tg3_nvram_read(tp, offset + 4, &val))
13626                         return;
13627
13628                 if (val == 0)
13629                         newver = true;
13630         }
13631
13632         dst_off = strlen(tp->fw_ver);
13633
13634         if (newver) {
13635                 if (TG3_VER_SIZE - dst_off < 16 ||
13636                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13637                         return;
13638
13639                 offset = offset + ver_offset - start;
13640                 for (i = 0; i < 16; i += 4) {
13641                         __be32 v;
13642                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13643                                 return;
13644
13645                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13646                 }
13647         } else {
13648                 u32 major, minor;
13649
13650                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13651                         return;
13652
13653                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13654                         TG3_NVM_BCVER_MAJSFT;
13655                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13656                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13657                          "v%d.%02d", major, minor);
13658         }
13659 }
13660
13661 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13662 {
13663         u32 val, major, minor;
13664
13665         /* Use native endian representation */
13666         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13667                 return;
13668
13669         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13670                 TG3_NVM_HWSB_CFG1_MAJSFT;
13671         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13672                 TG3_NVM_HWSB_CFG1_MINSFT;
13673
13674         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13675 }
13676
13677 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13678 {
13679         u32 offset, major, minor, build;
13680
13681         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13682
13683         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13684                 return;
13685
13686         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13687         case TG3_EEPROM_SB_REVISION_0:
13688                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13689                 break;
13690         case TG3_EEPROM_SB_REVISION_2:
13691                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13692                 break;
13693         case TG3_EEPROM_SB_REVISION_3:
13694                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13695                 break;
13696         case TG3_EEPROM_SB_REVISION_4:
13697                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13698                 break;
13699         case TG3_EEPROM_SB_REVISION_5:
13700                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13701                 break;
13702         case TG3_EEPROM_SB_REVISION_6:
13703                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13704                 break;
13705         default:
13706                 return;
13707         }
13708
13709         if (tg3_nvram_read(tp, offset, &val))
13710                 return;
13711
13712         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13713                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13714         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13715                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13716         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13717
13718         if (minor > 99 || build > 26)
13719                 return;
13720
13721         offset = strlen(tp->fw_ver);
13722         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13723                  " v%d.%02d", major, minor);
13724
13725         if (build > 0) {
13726                 offset = strlen(tp->fw_ver);
13727                 if (offset < TG3_VER_SIZE - 1)
13728                         tp->fw_ver[offset] = 'a' + build - 1;
13729         }
13730 }
13731
13732 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13733 {
13734         u32 val, offset, start;
13735         int i, vlen;
13736
13737         for (offset = TG3_NVM_DIR_START;
13738              offset < TG3_NVM_DIR_END;
13739              offset += TG3_NVM_DIRENT_SIZE) {
13740                 if (tg3_nvram_read(tp, offset, &val))
13741                         return;
13742
13743                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13744                         break;
13745         }
13746
13747         if (offset == TG3_NVM_DIR_END)
13748                 return;
13749
13750         if (!tg3_flag(tp, 5705_PLUS))
13751                 start = 0x08000000;
13752         else if (tg3_nvram_read(tp, offset - 4, &start))
13753                 return;
13754
13755         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13756             !tg3_fw_img_is_valid(tp, offset) ||
13757             tg3_nvram_read(tp, offset + 8, &val))
13758                 return;
13759
13760         offset += val - start;
13761
13762         vlen = strlen(tp->fw_ver);
13763
13764         tp->fw_ver[vlen++] = ',';
13765         tp->fw_ver[vlen++] = ' ';
13766
13767         for (i = 0; i < 4; i++) {
13768                 __be32 v;
13769                 if (tg3_nvram_read_be32(tp, offset, &v))
13770                         return;
13771
13772                 offset += sizeof(v);
13773
13774                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13775                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13776                         break;
13777                 }
13778
13779                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13780                 vlen += sizeof(v);
13781         }
13782 }
13783
13784 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13785 {
13786         int vlen;
13787         u32 apedata;
13788         char *fwtype;
13789
13790         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13791                 return;
13792
13793         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13794         if (apedata != APE_SEG_SIG_MAGIC)
13795                 return;
13796
13797         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13798         if (!(apedata & APE_FW_STATUS_READY))
13799                 return;
13800
13801         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13802
13803         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13804                 tg3_flag_set(tp, APE_HAS_NCSI);
13805                 fwtype = "NCSI";
13806         } else {
13807                 fwtype = "DASH";
13808         }
13809
13810         vlen = strlen(tp->fw_ver);
13811
13812         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13813                  fwtype,
13814                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13815                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13816                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13817                  (apedata & APE_FW_VERSION_BLDMSK));
13818 }
13819
13820 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13821 {
13822         u32 val;
13823         bool vpd_vers = false;
13824
13825         if (tp->fw_ver[0] != 0)
13826                 vpd_vers = true;
13827
13828         if (tg3_flag(tp, NO_NVRAM)) {
13829                 strcat(tp->fw_ver, "sb");
13830                 return;
13831         }
13832
13833         if (tg3_nvram_read(tp, 0, &val))
13834                 return;
13835
13836         if (val == TG3_EEPROM_MAGIC)
13837                 tg3_read_bc_ver(tp);
13838         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13839                 tg3_read_sb_ver(tp, val);
13840         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13841                 tg3_read_hwsb_ver(tp);
13842         else
13843                 return;
13844
13845         if (vpd_vers)
13846                 goto done;
13847
13848         if (tg3_flag(tp, ENABLE_APE)) {
13849                 if (tg3_flag(tp, ENABLE_ASF))
13850                         tg3_read_dash_ver(tp);
13851         } else if (tg3_flag(tp, ENABLE_ASF)) {
13852                 tg3_read_mgmtfw_ver(tp);
13853         }
13854
13855 done:
13856         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13857 }
13858
13859 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13860 {
13861         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13862                 return TG3_RX_RET_MAX_SIZE_5717;
13863         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13864                 return TG3_RX_RET_MAX_SIZE_5700;
13865         else
13866                 return TG3_RX_RET_MAX_SIZE_5705;
13867 }
13868
13869 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13870         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13871         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13872         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13873         { },
13874 };
13875
13876 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13877 {
13878         struct pci_dev *peer;
13879         unsigned int func, devnr = tp->pdev->devfn & ~7;
13880
13881         for (func = 0; func < 8; func++) {
13882                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13883                 if (peer && peer != tp->pdev)
13884                         break;
13885                 pci_dev_put(peer);
13886         }
13887         /* 5704 can be configured in single-port mode, set peer to
13888          * tp->pdev in that case.
13889          */
13890         if (!peer) {
13891                 peer = tp->pdev;
13892                 return peer;
13893         }
13894
13895         /*
13896          * We don't need to keep the refcount elevated; there's no way
13897          * to remove one half of this device without removing the other
13898          */
13899         pci_dev_put(peer);
13900
13901         return peer;
13902 }
13903
13904 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13905 {
13906         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13907         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13908                 u32 reg;
13909
13910                 /* All devices that use the alternate
13911                  * ASIC REV location have a CPMU.
13912                  */
13913                 tg3_flag_set(tp, CPMU_PRESENT);
13914
13915                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13916                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13917                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13918                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13919                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13920                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13921                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13922                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13923                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13924                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13925                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13926                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13927                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13928                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13929                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13930                         reg = TG3PCI_GEN15_PRODID_ASICREV;
13931                 else
13932                         reg = TG3PCI_PRODID_ASICREV;
13933
13934                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13935         }
13936
13937         /* Wrong chip ID in 5752 A0. This code can be removed later
13938          * as A0 is not in production.
13939          */
13940         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13941                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13942
13943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13945             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13946                 tg3_flag_set(tp, 5717_PLUS);
13947
13948         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13949             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13950                 tg3_flag_set(tp, 57765_CLASS);
13951
13952         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13953                 tg3_flag_set(tp, 57765_PLUS);
13954
13955         /* Intentionally exclude ASIC_REV_5906 */
13956         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13957             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13958             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13961             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13962             tg3_flag(tp, 57765_PLUS))
13963                 tg3_flag_set(tp, 5755_PLUS);
13964
13965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13966             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13967                 tg3_flag_set(tp, 5780_CLASS);
13968
13969         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13970             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13971             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13972             tg3_flag(tp, 5755_PLUS) ||
13973             tg3_flag(tp, 5780_CLASS))
13974                 tg3_flag_set(tp, 5750_PLUS);
13975
13976         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13977             tg3_flag(tp, 5750_PLUS))
13978                 tg3_flag_set(tp, 5705_PLUS);
13979 }
13980
13981 static int __devinit tg3_get_invariants(struct tg3 *tp)
13982 {
13983         u32 misc_ctrl_reg;
13984         u32 pci_state_reg, grc_misc_cfg;
13985         u32 val;
13986         u16 pci_cmd;
13987         int err;
13988
13989         /* Force memory write invalidate off.  If we leave it on,
13990          * then on 5700_BX chips we have to enable a workaround.
13991          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13992          * to match the cacheline size.  The Broadcom driver have this
13993          * workaround but turns MWI off all the times so never uses
13994          * it.  This seems to suggest that the workaround is insufficient.
13995          */
13996         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13997         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13998         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13999
14000         /* Important! -- Make sure register accesses are byteswapped
14001          * correctly.  Also, for those chips that require it, make
14002          * sure that indirect register accesses are enabled before
14003          * the first operation.
14004          */
14005         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14006                               &misc_ctrl_reg);
14007         tp->misc_host_ctrl |= (misc_ctrl_reg &
14008                                MISC_HOST_CTRL_CHIPREV);
14009         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14010                                tp->misc_host_ctrl);
14011
14012         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14013
14014         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14015          * we need to disable memory and use config. cycles
14016          * only to access all registers. The 5702/03 chips
14017          * can mistakenly decode the special cycles from the
14018          * ICH chipsets as memory write cycles, causing corruption
14019          * of register and memory space. Only certain ICH bridges
14020          * will drive special cycles with non-zero data during the
14021          * address phase which can fall within the 5703's address
14022          * range. This is not an ICH bug as the PCI spec allows
14023          * non-zero address during special cycles. However, only
14024          * these ICH bridges are known to drive non-zero addresses
14025          * during special cycles.
14026          *
14027          * Since special cycles do not cross PCI bridges, we only
14028          * enable this workaround if the 5703 is on the secondary
14029          * bus of these ICH bridges.
14030          */
14031         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14032             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14033                 static struct tg3_dev_id {
14034                         u32     vendor;
14035                         u32     device;
14036                         u32     rev;
14037                 } ich_chipsets[] = {
14038                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14039                           PCI_ANY_ID },
14040                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14041                           PCI_ANY_ID },
14042                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14043                           0xa },
14044                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14045                           PCI_ANY_ID },
14046                         { },
14047                 };
14048                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14049                 struct pci_dev *bridge = NULL;
14050
14051                 while (pci_id->vendor != 0) {
14052                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14053                                                 bridge);
14054                         if (!bridge) {
14055                                 pci_id++;
14056                                 continue;
14057                         }
14058                         if (pci_id->rev != PCI_ANY_ID) {
14059                                 if (bridge->revision > pci_id->rev)
14060                                         continue;
14061                         }
14062                         if (bridge->subordinate &&
14063                             (bridge->subordinate->number ==
14064                              tp->pdev->bus->number)) {
14065                                 tg3_flag_set(tp, ICH_WORKAROUND);
14066                                 pci_dev_put(bridge);
14067                                 break;
14068                         }
14069                 }
14070         }
14071
14072         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14073                 static struct tg3_dev_id {
14074                         u32     vendor;
14075                         u32     device;
14076                 } bridge_chipsets[] = {
14077                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14078                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14079                         { },
14080                 };
14081                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14082                 struct pci_dev *bridge = NULL;
14083
14084                 while (pci_id->vendor != 0) {
14085                         bridge = pci_get_device(pci_id->vendor,
14086                                                 pci_id->device,
14087                                                 bridge);
14088                         if (!bridge) {
14089                                 pci_id++;
14090                                 continue;
14091                         }
14092                         if (bridge->subordinate &&
14093                             (bridge->subordinate->number <=
14094                              tp->pdev->bus->number) &&
14095                             (bridge->subordinate->subordinate >=
14096                              tp->pdev->bus->number)) {
14097                                 tg3_flag_set(tp, 5701_DMA_BUG);
14098                                 pci_dev_put(bridge);
14099                                 break;
14100                         }
14101                 }
14102         }
14103
14104         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14105          * DMA addresses > 40-bit. This bridge may have other additional
14106          * 57xx devices behind it in some 4-port NIC designs for example.
14107          * Any tg3 device found behind the bridge will also need the 40-bit
14108          * DMA workaround.
14109          */
14110         if (tg3_flag(tp, 5780_CLASS)) {
14111                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14112                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14113         } else {
14114                 struct pci_dev *bridge = NULL;
14115
14116                 do {
14117                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14118                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14119                                                 bridge);
14120                         if (bridge && bridge->subordinate &&
14121                             (bridge->subordinate->number <=
14122                              tp->pdev->bus->number) &&
14123                             (bridge->subordinate->subordinate >=
14124                              tp->pdev->bus->number)) {
14125                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14126                                 pci_dev_put(bridge);
14127                                 break;
14128                         }
14129                 } while (bridge);
14130         }
14131
14132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14133             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14134                 tp->pdev_peer = tg3_find_peer(tp);
14135
14136         /* Determine TSO capabilities */
14137         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14138                 ; /* Do nothing. HW bug. */
14139         else if (tg3_flag(tp, 57765_PLUS))
14140                 tg3_flag_set(tp, HW_TSO_3);
14141         else if (tg3_flag(tp, 5755_PLUS) ||
14142                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14143                 tg3_flag_set(tp, HW_TSO_2);
14144         else if (tg3_flag(tp, 5750_PLUS)) {
14145                 tg3_flag_set(tp, HW_TSO_1);
14146                 tg3_flag_set(tp, TSO_BUG);
14147                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14148                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14149                         tg3_flag_clear(tp, TSO_BUG);
14150         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14151                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14152                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14153                         tg3_flag_set(tp, TSO_BUG);
14154                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14155                         tp->fw_needed = FIRMWARE_TG3TSO5;
14156                 else
14157                         tp->fw_needed = FIRMWARE_TG3TSO;
14158         }
14159
14160         /* Selectively allow TSO based on operating conditions */
14161         if (tg3_flag(tp, HW_TSO_1) ||
14162             tg3_flag(tp, HW_TSO_2) ||
14163             tg3_flag(tp, HW_TSO_3) ||
14164             tp->fw_needed) {
14165                 /* For firmware TSO, assume ASF is disabled.
14166                  * We'll disable TSO later if we discover ASF
14167                  * is enabled in tg3_get_eeprom_hw_cfg().
14168                  */
14169                 tg3_flag_set(tp, TSO_CAPABLE);
14170         } else {
14171                 tg3_flag_clear(tp, TSO_CAPABLE);
14172                 tg3_flag_clear(tp, TSO_BUG);
14173                 tp->fw_needed = NULL;
14174         }
14175
14176         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14177                 tp->fw_needed = FIRMWARE_TG3;
14178
14179         tp->irq_max = 1;
14180
14181         if (tg3_flag(tp, 5750_PLUS)) {
14182                 tg3_flag_set(tp, SUPPORT_MSI);
14183                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14184                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14185                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14186                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14187                      tp->pdev_peer == tp->pdev))
14188                         tg3_flag_clear(tp, SUPPORT_MSI);
14189
14190                 if (tg3_flag(tp, 5755_PLUS) ||
14191                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14192                         tg3_flag_set(tp, 1SHOT_MSI);
14193                 }
14194
14195                 if (tg3_flag(tp, 57765_PLUS)) {
14196                         tg3_flag_set(tp, SUPPORT_MSIX);
14197                         tp->irq_max = TG3_IRQ_MAX_VECS;
14198                         tg3_rss_init_dflt_indir_tbl(tp);
14199                 }
14200         }
14201
14202         if (tg3_flag(tp, 5755_PLUS))
14203                 tg3_flag_set(tp, SHORT_DMA_BUG);
14204
14205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14206                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14207
14208         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14209             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14210             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14211                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14212
14213         if (tg3_flag(tp, 57765_PLUS) &&
14214             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14215                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14216
14217         if (!tg3_flag(tp, 5705_PLUS) ||
14218             tg3_flag(tp, 5780_CLASS) ||
14219             tg3_flag(tp, USE_JUMBO_BDFLAG))
14220                 tg3_flag_set(tp, JUMBO_CAPABLE);
14221
14222         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14223                               &pci_state_reg);
14224
14225         if (pci_is_pcie(tp->pdev)) {
14226                 u16 lnkctl;
14227
14228                 tg3_flag_set(tp, PCI_EXPRESS);
14229
14230                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14231                         int readrq = pcie_get_readrq(tp->pdev);
14232                         if (readrq > 2048)
14233                                 pcie_set_readrq(tp->pdev, 2048);
14234                 }
14235
14236                 pci_read_config_word(tp->pdev,
14237                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14238                                      &lnkctl);
14239                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14240                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14241                             ASIC_REV_5906) {
14242                                 tg3_flag_clear(tp, HW_TSO_2);
14243                                 tg3_flag_clear(tp, TSO_CAPABLE);
14244                         }
14245                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14246                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14247                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14248                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14249                                 tg3_flag_set(tp, CLKREQ_BUG);
14250                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14251                         tg3_flag_set(tp, L1PLLPD_EN);
14252                 }
14253         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14254                 /* BCM5785 devices are effectively PCIe devices, and should
14255                  * follow PCIe codepaths, but do not have a PCIe capabilities
14256                  * section.
14257                  */
14258                 tg3_flag_set(tp, PCI_EXPRESS);
14259         } else if (!tg3_flag(tp, 5705_PLUS) ||
14260                    tg3_flag(tp, 5780_CLASS)) {
14261                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14262                 if (!tp->pcix_cap) {
14263                         dev_err(&tp->pdev->dev,
14264                                 "Cannot find PCI-X capability, aborting\n");
14265                         return -EIO;
14266                 }
14267
14268                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14269                         tg3_flag_set(tp, PCIX_MODE);
14270         }
14271
14272         /* If we have an AMD 762 or VIA K8T800 chipset, write
14273          * reordering to the mailbox registers done by the host
14274          * controller can cause major troubles.  We read back from
14275          * every mailbox register write to force the writes to be
14276          * posted to the chip in order.
14277          */
14278         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14279             !tg3_flag(tp, PCI_EXPRESS))
14280                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14281
14282         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14283                              &tp->pci_cacheline_sz);
14284         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14285                              &tp->pci_lat_timer);
14286         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14287             tp->pci_lat_timer < 64) {
14288                 tp->pci_lat_timer = 64;
14289                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14290                                       tp->pci_lat_timer);
14291         }
14292
14293         /* Important! -- It is critical that the PCI-X hw workaround
14294          * situation is decided before the first MMIO register access.
14295          */
14296         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14297                 /* 5700 BX chips need to have their TX producer index
14298                  * mailboxes written twice to workaround a bug.
14299                  */
14300                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14301
14302                 /* If we are in PCI-X mode, enable register write workaround.
14303                  *
14304                  * The workaround is to use indirect register accesses
14305                  * for all chip writes not to mailbox registers.
14306                  */
14307                 if (tg3_flag(tp, PCIX_MODE)) {
14308                         u32 pm_reg;
14309
14310                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14311
14312                         /* The chip can have it's power management PCI config
14313                          * space registers clobbered due to this bug.
14314                          * So explicitly force the chip into D0 here.
14315                          */
14316                         pci_read_config_dword(tp->pdev,
14317                                               tp->pm_cap + PCI_PM_CTRL,
14318                                               &pm_reg);
14319                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14320                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14321                         pci_write_config_dword(tp->pdev,
14322                                                tp->pm_cap + PCI_PM_CTRL,
14323                                                pm_reg);
14324
14325                         /* Also, force SERR#/PERR# in PCI command. */
14326                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14327                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14328                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14329                 }
14330         }
14331
14332         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14333                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14334         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14335                 tg3_flag_set(tp, PCI_32BIT);
14336
14337         /* Chip-specific fixup from Broadcom driver */
14338         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14339             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14340                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14341                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14342         }
14343
14344         /* Default fast path register access methods */
14345         tp->read32 = tg3_read32;
14346         tp->write32 = tg3_write32;
14347         tp->read32_mbox = tg3_read32;
14348         tp->write32_mbox = tg3_write32;
14349         tp->write32_tx_mbox = tg3_write32;
14350         tp->write32_rx_mbox = tg3_write32;
14351
14352         /* Various workaround register access methods */
14353         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14354                 tp->write32 = tg3_write_indirect_reg32;
14355         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14356                  (tg3_flag(tp, PCI_EXPRESS) &&
14357                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14358                 /*
14359                  * Back to back register writes can cause problems on these
14360                  * chips, the workaround is to read back all reg writes
14361                  * except those to mailbox regs.
14362                  *
14363                  * See tg3_write_indirect_reg32().
14364                  */
14365                 tp->write32 = tg3_write_flush_reg32;
14366         }
14367
14368         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14369                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14370                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14371                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14372         }
14373
14374         if (tg3_flag(tp, ICH_WORKAROUND)) {
14375                 tp->read32 = tg3_read_indirect_reg32;
14376                 tp->write32 = tg3_write_indirect_reg32;
14377                 tp->read32_mbox = tg3_read_indirect_mbox;
14378                 tp->write32_mbox = tg3_write_indirect_mbox;
14379                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14380                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14381
14382                 iounmap(tp->regs);
14383                 tp->regs = NULL;
14384
14385                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14386                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14387                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14388         }
14389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14390                 tp->read32_mbox = tg3_read32_mbox_5906;
14391                 tp->write32_mbox = tg3_write32_mbox_5906;
14392                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14393                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14394         }
14395
14396         if (tp->write32 == tg3_write_indirect_reg32 ||
14397             (tg3_flag(tp, PCIX_MODE) &&
14398              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14399               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14400                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14401
14402         /* The memory arbiter has to be enabled in order for SRAM accesses
14403          * to succeed.  Normally on powerup the tg3 chip firmware will make
14404          * sure it is enabled, but other entities such as system netboot
14405          * code might disable it.
14406          */
14407         val = tr32(MEMARB_MODE);
14408         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14409
14410         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14411         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14412             tg3_flag(tp, 5780_CLASS)) {
14413                 if (tg3_flag(tp, PCIX_MODE)) {
14414                         pci_read_config_dword(tp->pdev,
14415                                               tp->pcix_cap + PCI_X_STATUS,
14416                                               &val);
14417                         tp->pci_fn = val & 0x7;
14418                 }
14419         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14420                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14421                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14422                     NIC_SRAM_CPMUSTAT_SIG) {
14423                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14424                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14425                 }
14426         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14427                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14428                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14429                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14430                     NIC_SRAM_CPMUSTAT_SIG) {
14431                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14432                                      TG3_CPMU_STATUS_FSHFT_5719;
14433                 }
14434         }
14435
14436         /* Get eeprom hw config before calling tg3_set_power_state().
14437          * In particular, the TG3_FLAG_IS_NIC flag must be
14438          * determined before calling tg3_set_power_state() so that
14439          * we know whether or not to switch out of Vaux power.
14440          * When the flag is set, it means that GPIO1 is used for eeprom
14441          * write protect and also implies that it is a LOM where GPIOs
14442          * are not used to switch power.
14443          */
14444         tg3_get_eeprom_hw_cfg(tp);
14445
14446         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14447                 tg3_flag_clear(tp, TSO_CAPABLE);
14448                 tg3_flag_clear(tp, TSO_BUG);
14449                 tp->fw_needed = NULL;
14450         }
14451
14452         if (tg3_flag(tp, ENABLE_APE)) {
14453                 /* Allow reads and writes to the
14454                  * APE register and memory space.
14455                  */
14456                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14457                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14458                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14459                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14460                                        pci_state_reg);
14461
14462                 tg3_ape_lock_init(tp);
14463         }
14464
14465         /* Set up tp->grc_local_ctrl before calling
14466          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14467          * will bring 5700's external PHY out of reset.
14468          * It is also used as eeprom write protect on LOMs.
14469          */
14470         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14471         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14472             tg3_flag(tp, EEPROM_WRITE_PROT))
14473                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14474                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14475         /* Unused GPIO3 must be driven as output on 5752 because there
14476          * are no pull-up resistors on unused GPIO pins.
14477          */
14478         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14479                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14480
14481         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14482             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14483             tg3_flag(tp, 57765_CLASS))
14484                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14485
14486         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14487             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14488                 /* Turn off the debug UART. */
14489                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14490                 if (tg3_flag(tp, IS_NIC))
14491                         /* Keep VMain power. */
14492                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14493                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14494         }
14495
14496         /* Switch out of Vaux if it is a NIC */
14497         tg3_pwrsrc_switch_to_vmain(tp);
14498
14499         /* Derive initial jumbo mode from MTU assigned in
14500          * ether_setup() via the alloc_etherdev() call
14501          */
14502         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14503                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14504
14505         /* Determine WakeOnLan speed to use. */
14506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14507             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14508             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14509             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14510                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14511         } else {
14512                 tg3_flag_set(tp, WOL_SPEED_100MB);
14513         }
14514
14515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14516                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14517
14518         /* A few boards don't want Ethernet@WireSpeed phy feature */
14519         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14520             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14521              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14522              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14523             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14524             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14525                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14526
14527         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14528             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14529                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14530         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14531                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14532
14533         if (tg3_flag(tp, 5705_PLUS) &&
14534             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14535             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14536             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14537             !tg3_flag(tp, 57765_PLUS)) {
14538                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14539                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14540                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14541                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14542                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14543                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14544                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14545                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14546                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14547                 } else
14548                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14549         }
14550
14551         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14552             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14553                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14554                 if (tp->phy_otp == 0)
14555                         tp->phy_otp = TG3_OTP_DEFAULT;
14556         }
14557
14558         if (tg3_flag(tp, CPMU_PRESENT))
14559                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14560         else
14561                 tp->mi_mode = MAC_MI_MODE_BASE;
14562
14563         tp->coalesce_mode = 0;
14564         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14565             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14566                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14567
14568         /* Set these bits to enable statistics workaround. */
14569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14570             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14571             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14572                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14573                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14574         }
14575
14576         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14577             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14578                 tg3_flag_set(tp, USE_PHYLIB);
14579
14580         err = tg3_mdio_init(tp);
14581         if (err)
14582                 return err;
14583
14584         /* Initialize data/descriptor byte/word swapping. */
14585         val = tr32(GRC_MODE);
14586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14587                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14588                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14589                         GRC_MODE_B2HRX_ENABLE |
14590                         GRC_MODE_HTX2B_ENABLE |
14591                         GRC_MODE_HOST_STACKUP);
14592         else
14593                 val &= GRC_MODE_HOST_STACKUP;
14594
14595         tw32(GRC_MODE, val | tp->grc_mode);
14596
14597         tg3_switch_clocks(tp);
14598
14599         /* Clear this out for sanity. */
14600         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14601
14602         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14603                               &pci_state_reg);
14604         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14605             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14606                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14607
14608                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14609                     chiprevid == CHIPREV_ID_5701_B0 ||
14610                     chiprevid == CHIPREV_ID_5701_B2 ||
14611                     chiprevid == CHIPREV_ID_5701_B5) {
14612                         void __iomem *sram_base;
14613
14614                         /* Write some dummy words into the SRAM status block
14615                          * area, see if it reads back correctly.  If the return
14616                          * value is bad, force enable the PCIX workaround.
14617                          */
14618                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14619
14620                         writel(0x00000000, sram_base);
14621                         writel(0x00000000, sram_base + 4);
14622                         writel(0xffffffff, sram_base + 4);
14623                         if (readl(sram_base) != 0x00000000)
14624                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14625                 }
14626         }
14627
14628         udelay(50);
14629         tg3_nvram_init(tp);
14630
14631         grc_misc_cfg = tr32(GRC_MISC_CFG);
14632         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14633
14634         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14635             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14636              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14637                 tg3_flag_set(tp, IS_5788);
14638
14639         if (!tg3_flag(tp, IS_5788) &&
14640             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14641                 tg3_flag_set(tp, TAGGED_STATUS);
14642         if (tg3_flag(tp, TAGGED_STATUS)) {
14643                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14644                                       HOSTCC_MODE_CLRTICK_TXBD);
14645
14646                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14647                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14648                                        tp->misc_host_ctrl);
14649         }
14650
14651         /* Preserve the APE MAC_MODE bits */
14652         if (tg3_flag(tp, ENABLE_APE))
14653                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14654         else
14655                 tp->mac_mode = 0;
14656
14657         /* these are limited to 10/100 only */
14658         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14659              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14660             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14661              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14662              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14663               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14664               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14665             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14666              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14667               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14668               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14669             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14670             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14671             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14672             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14673                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14674
14675         err = tg3_phy_probe(tp);
14676         if (err) {
14677                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14678                 /* ... but do not return immediately ... */
14679                 tg3_mdio_fini(tp);
14680         }
14681
14682         tg3_read_vpd(tp);
14683         tg3_read_fw_ver(tp);
14684
14685         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14686                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14687         } else {
14688                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14689                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14690                 else
14691                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14692         }
14693
14694         /* 5700 {AX,BX} chips have a broken status block link
14695          * change bit implementation, so we must use the
14696          * status register in those cases.
14697          */
14698         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14699                 tg3_flag_set(tp, USE_LINKCHG_REG);
14700         else
14701                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14702
14703         /* The led_ctrl is set during tg3_phy_probe, here we might
14704          * have to force the link status polling mechanism based
14705          * upon subsystem IDs.
14706          */
14707         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14709             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14710                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14711                 tg3_flag_set(tp, USE_LINKCHG_REG);
14712         }
14713
14714         /* For all SERDES we poll the MAC status register. */
14715         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14716                 tg3_flag_set(tp, POLL_SERDES);
14717         else
14718                 tg3_flag_clear(tp, POLL_SERDES);
14719
14720         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14721         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14722         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14723             tg3_flag(tp, PCIX_MODE)) {
14724                 tp->rx_offset = NET_SKB_PAD;
14725 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14726                 tp->rx_copy_thresh = ~(u16)0;
14727 #endif
14728         }
14729
14730         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14731         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14732         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14733
14734         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14735
14736         /* Increment the rx prod index on the rx std ring by at most
14737          * 8 for these chips to workaround hw errata.
14738          */
14739         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14740             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14741             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14742                 tp->rx_std_max_post = 8;
14743
14744         if (tg3_flag(tp, ASPM_WORKAROUND))
14745                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14746                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14747
14748         return err;
14749 }
14750
14751 #ifdef CONFIG_SPARC
14752 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14753 {
14754         struct net_device *dev = tp->dev;
14755         struct pci_dev *pdev = tp->pdev;
14756         struct device_node *dp = pci_device_to_OF_node(pdev);
14757         const unsigned char *addr;
14758         int len;
14759
14760         addr = of_get_property(dp, "local-mac-address", &len);
14761         if (addr && len == 6) {
14762                 memcpy(dev->dev_addr, addr, 6);
14763                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14764                 return 0;
14765         }
14766         return -ENODEV;
14767 }
14768
14769 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14770 {
14771         struct net_device *dev = tp->dev;
14772
14773         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14774         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14775         return 0;
14776 }
14777 #endif
14778
14779 static int __devinit tg3_get_device_address(struct tg3 *tp)
14780 {
14781         struct net_device *dev = tp->dev;
14782         u32 hi, lo, mac_offset;
14783         int addr_ok = 0;
14784
14785 #ifdef CONFIG_SPARC
14786         if (!tg3_get_macaddr_sparc(tp))
14787                 return 0;
14788 #endif
14789
14790         mac_offset = 0x7c;
14791         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14792             tg3_flag(tp, 5780_CLASS)) {
14793                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14794                         mac_offset = 0xcc;
14795                 if (tg3_nvram_lock(tp))
14796                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14797                 else
14798                         tg3_nvram_unlock(tp);
14799         } else if (tg3_flag(tp, 5717_PLUS)) {
14800                 if (tp->pci_fn & 1)
14801                         mac_offset = 0xcc;
14802                 if (tp->pci_fn > 1)
14803                         mac_offset += 0x18c;
14804         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14805                 mac_offset = 0x10;
14806
14807         /* First try to get it from MAC address mailbox. */
14808         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14809         if ((hi >> 16) == 0x484b) {
14810                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14811                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14812
14813                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14814                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14815                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14816                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14817                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14818
14819                 /* Some old bootcode may report a 0 MAC address in SRAM */
14820                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14821         }
14822         if (!addr_ok) {
14823                 /* Next, try NVRAM. */
14824                 if (!tg3_flag(tp, NO_NVRAM) &&
14825                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14826                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14827                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14828                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14829                 }
14830                 /* Finally just fetch it out of the MAC control regs. */
14831                 else {
14832                         hi = tr32(MAC_ADDR_0_HIGH);
14833                         lo = tr32(MAC_ADDR_0_LOW);
14834
14835                         dev->dev_addr[5] = lo & 0xff;
14836                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14837                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14838                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14839                         dev->dev_addr[1] = hi & 0xff;
14840                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14841                 }
14842         }
14843
14844         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14845 #ifdef CONFIG_SPARC
14846                 if (!tg3_get_default_macaddr_sparc(tp))
14847                         return 0;
14848 #endif
14849                 return -EINVAL;
14850         }
14851         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14852         return 0;
14853 }
14854
14855 #define BOUNDARY_SINGLE_CACHELINE       1
14856 #define BOUNDARY_MULTI_CACHELINE        2
14857
14858 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14859 {
14860         int cacheline_size;
14861         u8 byte;
14862         int goal;
14863
14864         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14865         if (byte == 0)
14866                 cacheline_size = 1024;
14867         else
14868                 cacheline_size = (int) byte * 4;
14869
14870         /* On 5703 and later chips, the boundary bits have no
14871          * effect.
14872          */
14873         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14874             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14875             !tg3_flag(tp, PCI_EXPRESS))
14876                 goto out;
14877
14878 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14879         goal = BOUNDARY_MULTI_CACHELINE;
14880 #else
14881 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14882         goal = BOUNDARY_SINGLE_CACHELINE;
14883 #else
14884         goal = 0;
14885 #endif
14886 #endif
14887
14888         if (tg3_flag(tp, 57765_PLUS)) {
14889                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14890                 goto out;
14891         }
14892
14893         if (!goal)
14894                 goto out;
14895
14896         /* PCI controllers on most RISC systems tend to disconnect
14897          * when a device tries to burst across a cache-line boundary.
14898          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14899          *
14900          * Unfortunately, for PCI-E there are only limited
14901          * write-side controls for this, and thus for reads
14902          * we will still get the disconnects.  We'll also waste
14903          * these PCI cycles for both read and write for chips
14904          * other than 5700 and 5701 which do not implement the
14905          * boundary bits.
14906          */
14907         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14908                 switch (cacheline_size) {
14909                 case 16:
14910                 case 32:
14911                 case 64:
14912                 case 128:
14913                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14914                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14915                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14916                         } else {
14917                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14918                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14919                         }
14920                         break;
14921
14922                 case 256:
14923                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14924                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14925                         break;
14926
14927                 default:
14928                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14929                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14930                         break;
14931                 }
14932         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14933                 switch (cacheline_size) {
14934                 case 16:
14935                 case 32:
14936                 case 64:
14937                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14938                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14939                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14940                                 break;
14941                         }
14942                         /* fallthrough */
14943                 case 128:
14944                 default:
14945                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14946                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14947                         break;
14948                 }
14949         } else {
14950                 switch (cacheline_size) {
14951                 case 16:
14952                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14953                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14954                                         DMA_RWCTRL_WRITE_BNDRY_16);
14955                                 break;
14956                         }
14957                         /* fallthrough */
14958                 case 32:
14959                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14960                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14961                                         DMA_RWCTRL_WRITE_BNDRY_32);
14962                                 break;
14963                         }
14964                         /* fallthrough */
14965                 case 64:
14966                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14967                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14968                                         DMA_RWCTRL_WRITE_BNDRY_64);
14969                                 break;
14970                         }
14971                         /* fallthrough */
14972                 case 128:
14973                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14974                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14975                                         DMA_RWCTRL_WRITE_BNDRY_128);
14976                                 break;
14977                         }
14978                         /* fallthrough */
14979                 case 256:
14980                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14981                                 DMA_RWCTRL_WRITE_BNDRY_256);
14982                         break;
14983                 case 512:
14984                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14985                                 DMA_RWCTRL_WRITE_BNDRY_512);
14986                         break;
14987                 case 1024:
14988                 default:
14989                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14990                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14991                         break;
14992                 }
14993         }
14994
14995 out:
14996         return val;
14997 }
14998
14999 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15000 {
15001         struct tg3_internal_buffer_desc test_desc;
15002         u32 sram_dma_descs;
15003         int i, ret;
15004
15005         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15006
15007         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15008         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15009         tw32(RDMAC_STATUS, 0);
15010         tw32(WDMAC_STATUS, 0);
15011
15012         tw32(BUFMGR_MODE, 0);
15013         tw32(FTQ_RESET, 0);
15014
15015         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15016         test_desc.addr_lo = buf_dma & 0xffffffff;
15017         test_desc.nic_mbuf = 0x00002100;
15018         test_desc.len = size;
15019
15020         /*
15021          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15022          * the *second* time the tg3 driver was getting loaded after an
15023          * initial scan.
15024          *
15025          * Broadcom tells me:
15026          *   ...the DMA engine is connected to the GRC block and a DMA
15027          *   reset may affect the GRC block in some unpredictable way...
15028          *   The behavior of resets to individual blocks has not been tested.
15029          *
15030          * Broadcom noted the GRC reset will also reset all sub-components.
15031          */
15032         if (to_device) {
15033                 test_desc.cqid_sqid = (13 << 8) | 2;
15034
15035                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15036                 udelay(40);
15037         } else {
15038                 test_desc.cqid_sqid = (16 << 8) | 7;
15039
15040                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15041                 udelay(40);
15042         }
15043         test_desc.flags = 0x00000005;
15044
15045         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15046                 u32 val;
15047
15048                 val = *(((u32 *)&test_desc) + i);
15049                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15050                                        sram_dma_descs + (i * sizeof(u32)));
15051                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15052         }
15053         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15054
15055         if (to_device)
15056                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15057         else
15058                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15059
15060         ret = -ENODEV;
15061         for (i = 0; i < 40; i++) {
15062                 u32 val;
15063
15064                 if (to_device)
15065                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15066                 else
15067                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15068                 if ((val & 0xffff) == sram_dma_descs) {
15069                         ret = 0;
15070                         break;
15071                 }
15072
15073                 udelay(100);
15074         }
15075
15076         return ret;
15077 }
15078
15079 #define TEST_BUFFER_SIZE        0x2000
15080
15081 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15082         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15083         { },
15084 };
15085
15086 static int __devinit tg3_test_dma(struct tg3 *tp)
15087 {
15088         dma_addr_t buf_dma;
15089         u32 *buf, saved_dma_rwctrl;
15090         int ret = 0;
15091
15092         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15093                                  &buf_dma, GFP_KERNEL);
15094         if (!buf) {
15095                 ret = -ENOMEM;
15096                 goto out_nofree;
15097         }
15098
15099         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15100                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15101
15102         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15103
15104         if (tg3_flag(tp, 57765_PLUS))
15105                 goto out;
15106
15107         if (tg3_flag(tp, PCI_EXPRESS)) {
15108                 /* DMA read watermark not used on PCIE */
15109                 tp->dma_rwctrl |= 0x00180000;
15110         } else if (!tg3_flag(tp, PCIX_MODE)) {
15111                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15112                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15113                         tp->dma_rwctrl |= 0x003f0000;
15114                 else
15115                         tp->dma_rwctrl |= 0x003f000f;
15116         } else {
15117                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15118                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15119                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15120                         u32 read_water = 0x7;
15121
15122                         /* If the 5704 is behind the EPB bridge, we can
15123                          * do the less restrictive ONE_DMA workaround for
15124                          * better performance.
15125                          */
15126                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15127                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15128                                 tp->dma_rwctrl |= 0x8000;
15129                         else if (ccval == 0x6 || ccval == 0x7)
15130                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15131
15132                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15133                                 read_water = 4;
15134                         /* Set bit 23 to enable PCIX hw bug fix */
15135                         tp->dma_rwctrl |=
15136                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15137                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15138                                 (1 << 23);
15139                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15140                         /* 5780 always in PCIX mode */
15141                         tp->dma_rwctrl |= 0x00144000;
15142                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15143                         /* 5714 always in PCIX mode */
15144                         tp->dma_rwctrl |= 0x00148000;
15145                 } else {
15146                         tp->dma_rwctrl |= 0x001b000f;
15147                 }
15148         }
15149
15150         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15151             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15152                 tp->dma_rwctrl &= 0xfffffff0;
15153
15154         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15155             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15156                 /* Remove this if it causes problems for some boards. */
15157                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15158
15159                 /* On 5700/5701 chips, we need to set this bit.
15160                  * Otherwise the chip will issue cacheline transactions
15161                  * to streamable DMA memory with not all the byte
15162                  * enables turned on.  This is an error on several
15163                  * RISC PCI controllers, in particular sparc64.
15164                  *
15165                  * On 5703/5704 chips, this bit has been reassigned
15166                  * a different meaning.  In particular, it is used
15167                  * on those chips to enable a PCI-X workaround.
15168                  */
15169                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15170         }
15171
15172         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15173
15174 #if 0
15175         /* Unneeded, already done by tg3_get_invariants.  */
15176         tg3_switch_clocks(tp);
15177 #endif
15178
15179         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15180             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15181                 goto out;
15182
15183         /* It is best to perform DMA test with maximum write burst size
15184          * to expose the 5700/5701 write DMA bug.
15185          */
15186         saved_dma_rwctrl = tp->dma_rwctrl;
15187         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15188         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15189
15190         while (1) {
15191                 u32 *p = buf, i;
15192
15193                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15194                         p[i] = i;
15195
15196                 /* Send the buffer to the chip. */
15197                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15198                 if (ret) {
15199                         dev_err(&tp->pdev->dev,
15200                                 "%s: Buffer write failed. err = %d\n",
15201                                 __func__, ret);
15202                         break;
15203                 }
15204
15205 #if 0
15206                 /* validate data reached card RAM correctly. */
15207                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15208                         u32 val;
15209                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15210                         if (le32_to_cpu(val) != p[i]) {
15211                                 dev_err(&tp->pdev->dev,
15212                                         "%s: Buffer corrupted on device! "
15213                                         "(%d != %d)\n", __func__, val, i);
15214                                 /* ret = -ENODEV here? */
15215                         }
15216                         p[i] = 0;
15217                 }
15218 #endif
15219                 /* Now read it back. */
15220                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15221                 if (ret) {
15222                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15223                                 "err = %d\n", __func__, ret);
15224                         break;
15225                 }
15226
15227                 /* Verify it. */
15228                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15229                         if (p[i] == i)
15230                                 continue;
15231
15232                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15233                             DMA_RWCTRL_WRITE_BNDRY_16) {
15234                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15235                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15236                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15237                                 break;
15238                         } else {
15239                                 dev_err(&tp->pdev->dev,
15240                                         "%s: Buffer corrupted on read back! "
15241                                         "(%d != %d)\n", __func__, p[i], i);
15242                                 ret = -ENODEV;
15243                                 goto out;
15244                         }
15245                 }
15246
15247                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15248                         /* Success. */
15249                         ret = 0;
15250                         break;
15251                 }
15252         }
15253         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15254             DMA_RWCTRL_WRITE_BNDRY_16) {
15255                 /* DMA test passed without adjusting DMA boundary,
15256                  * now look for chipsets that are known to expose the
15257                  * DMA bug without failing the test.
15258                  */
15259                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15260                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15261                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15262                 } else {
15263                         /* Safe to use the calculated DMA boundary. */
15264                         tp->dma_rwctrl = saved_dma_rwctrl;
15265                 }
15266
15267                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15268         }
15269
15270 out:
15271         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15272 out_nofree:
15273         return ret;
15274 }
15275
15276 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15277 {
15278         if (tg3_flag(tp, 57765_PLUS)) {
15279                 tp->bufmgr_config.mbuf_read_dma_low_water =
15280                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15281                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15282                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15283                 tp->bufmgr_config.mbuf_high_water =
15284                         DEFAULT_MB_HIGH_WATER_57765;
15285
15286                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15287                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15288                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15289                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15290                 tp->bufmgr_config.mbuf_high_water_jumbo =
15291                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15292         } else if (tg3_flag(tp, 5705_PLUS)) {
15293                 tp->bufmgr_config.mbuf_read_dma_low_water =
15294                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15295                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15296                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15297                 tp->bufmgr_config.mbuf_high_water =
15298                         DEFAULT_MB_HIGH_WATER_5705;
15299                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15300                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15301                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15302                         tp->bufmgr_config.mbuf_high_water =
15303                                 DEFAULT_MB_HIGH_WATER_5906;
15304                 }
15305
15306                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15307                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15308                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15309                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15310                 tp->bufmgr_config.mbuf_high_water_jumbo =
15311                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15312         } else {
15313                 tp->bufmgr_config.mbuf_read_dma_low_water =
15314                         DEFAULT_MB_RDMA_LOW_WATER;
15315                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15316                         DEFAULT_MB_MACRX_LOW_WATER;
15317                 tp->bufmgr_config.mbuf_high_water =
15318                         DEFAULT_MB_HIGH_WATER;
15319
15320                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15321                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15322                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15323                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15324                 tp->bufmgr_config.mbuf_high_water_jumbo =
15325                         DEFAULT_MB_HIGH_WATER_JUMBO;
15326         }
15327
15328         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15329         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15330 }
15331
15332 static char * __devinit tg3_phy_string(struct tg3 *tp)
15333 {
15334         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15335         case TG3_PHY_ID_BCM5400:        return "5400";
15336         case TG3_PHY_ID_BCM5401:        return "5401";
15337         case TG3_PHY_ID_BCM5411:        return "5411";
15338         case TG3_PHY_ID_BCM5701:        return "5701";
15339         case TG3_PHY_ID_BCM5703:        return "5703";
15340         case TG3_PHY_ID_BCM5704:        return "5704";
15341         case TG3_PHY_ID_BCM5705:        return "5705";
15342         case TG3_PHY_ID_BCM5750:        return "5750";
15343         case TG3_PHY_ID_BCM5752:        return "5752";
15344         case TG3_PHY_ID_BCM5714:        return "5714";
15345         case TG3_PHY_ID_BCM5780:        return "5780";
15346         case TG3_PHY_ID_BCM5755:        return "5755";
15347         case TG3_PHY_ID_BCM5787:        return "5787";
15348         case TG3_PHY_ID_BCM5784:        return "5784";
15349         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15350         case TG3_PHY_ID_BCM5906:        return "5906";
15351         case TG3_PHY_ID_BCM5761:        return "5761";
15352         case TG3_PHY_ID_BCM5718C:       return "5718C";
15353         case TG3_PHY_ID_BCM5718S:       return "5718S";
15354         case TG3_PHY_ID_BCM57765:       return "57765";
15355         case TG3_PHY_ID_BCM5719C:       return "5719C";
15356         case TG3_PHY_ID_BCM5720C:       return "5720C";
15357         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15358         case 0:                 return "serdes";
15359         default:                return "unknown";
15360         }
15361 }
15362
15363 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15364 {
15365         if (tg3_flag(tp, PCI_EXPRESS)) {
15366                 strcpy(str, "PCI Express");
15367                 return str;
15368         } else if (tg3_flag(tp, PCIX_MODE)) {
15369                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15370
15371                 strcpy(str, "PCIX:");
15372
15373                 if ((clock_ctrl == 7) ||
15374                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15375                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15376                         strcat(str, "133MHz");
15377                 else if (clock_ctrl == 0)
15378                         strcat(str, "33MHz");
15379                 else if (clock_ctrl == 2)
15380                         strcat(str, "50MHz");
15381                 else if (clock_ctrl == 4)
15382                         strcat(str, "66MHz");
15383                 else if (clock_ctrl == 6)
15384                         strcat(str, "100MHz");
15385         } else {
15386                 strcpy(str, "PCI:");
15387                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15388                         strcat(str, "66MHz");
15389                 else
15390                         strcat(str, "33MHz");
15391         }
15392         if (tg3_flag(tp, PCI_32BIT))
15393                 strcat(str, ":32-bit");
15394         else
15395                 strcat(str, ":64-bit");
15396         return str;
15397 }
15398
15399 static void __devinit tg3_init_coal(struct tg3 *tp)
15400 {
15401         struct ethtool_coalesce *ec = &tp->coal;
15402
15403         memset(ec, 0, sizeof(*ec));
15404         ec->cmd = ETHTOOL_GCOALESCE;
15405         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15406         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15407         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15408         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15409         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15410         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15411         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15412         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15413         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15414
15415         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15416                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15417                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15418                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15419                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15420                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15421         }
15422
15423         if (tg3_flag(tp, 5705_PLUS)) {
15424                 ec->rx_coalesce_usecs_irq = 0;
15425                 ec->tx_coalesce_usecs_irq = 0;
15426                 ec->stats_block_coalesce_usecs = 0;
15427         }
15428 }
15429
15430 static int __devinit tg3_init_one(struct pci_dev *pdev,
15431                                   const struct pci_device_id *ent)
15432 {
15433         struct net_device *dev;
15434         struct tg3 *tp;
15435         int i, err, pm_cap;
15436         u32 sndmbx, rcvmbx, intmbx;
15437         char str[40];
15438         u64 dma_mask, persist_dma_mask;
15439         netdev_features_t features = 0;
15440
15441         printk_once(KERN_INFO "%s\n", version);
15442
15443         err = pci_enable_device(pdev);
15444         if (err) {
15445                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15446                 return err;
15447         }
15448
15449         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15450         if (err) {
15451                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15452                 goto err_out_disable_pdev;
15453         }
15454
15455         pci_set_master(pdev);
15456
15457         /* Find power-management capability. */
15458         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15459         if (pm_cap == 0) {
15460                 dev_err(&pdev->dev,
15461                         "Cannot find Power Management capability, aborting\n");
15462                 err = -EIO;
15463                 goto err_out_free_res;
15464         }
15465
15466         err = pci_set_power_state(pdev, PCI_D0);
15467         if (err) {
15468                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15469                 goto err_out_free_res;
15470         }
15471
15472         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15473         if (!dev) {
15474                 err = -ENOMEM;
15475                 goto err_out_power_down;
15476         }
15477
15478         SET_NETDEV_DEV(dev, &pdev->dev);
15479
15480         tp = netdev_priv(dev);
15481         tp->pdev = pdev;
15482         tp->dev = dev;
15483         tp->pm_cap = pm_cap;
15484         tp->rx_mode = TG3_DEF_RX_MODE;
15485         tp->tx_mode = TG3_DEF_TX_MODE;
15486
15487         if (tg3_debug > 0)
15488                 tp->msg_enable = tg3_debug;
15489         else
15490                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15491
15492         /* The word/byte swap controls here control register access byte
15493          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15494          * setting below.
15495          */
15496         tp->misc_host_ctrl =
15497                 MISC_HOST_CTRL_MASK_PCI_INT |
15498                 MISC_HOST_CTRL_WORD_SWAP |
15499                 MISC_HOST_CTRL_INDIR_ACCESS |
15500                 MISC_HOST_CTRL_PCISTATE_RW;
15501
15502         /* The NONFRM (non-frame) byte/word swap controls take effect
15503          * on descriptor entries, anything which isn't packet data.
15504          *
15505          * The StrongARM chips on the board (one for tx, one for rx)
15506          * are running in big-endian mode.
15507          */
15508         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15509                         GRC_MODE_WSWAP_NONFRM_DATA);
15510 #ifdef __BIG_ENDIAN
15511         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15512 #endif
15513         spin_lock_init(&tp->lock);
15514         spin_lock_init(&tp->indirect_lock);
15515         INIT_WORK(&tp->reset_task, tg3_reset_task);
15516
15517         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15518         if (!tp->regs) {
15519                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15520                 err = -ENOMEM;
15521                 goto err_out_free_dev;
15522         }
15523
15524         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15525             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15526             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15527             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15528             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15529             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15530             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15531             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15532                 tg3_flag_set(tp, ENABLE_APE);
15533                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15534                 if (!tp->aperegs) {
15535                         dev_err(&pdev->dev,
15536                                 "Cannot map APE registers, aborting\n");
15537                         err = -ENOMEM;
15538                         goto err_out_iounmap;
15539                 }
15540         }
15541
15542         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15543         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15544
15545         dev->ethtool_ops = &tg3_ethtool_ops;
15546         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15547         dev->netdev_ops = &tg3_netdev_ops;
15548         dev->irq = pdev->irq;
15549
15550         err = tg3_get_invariants(tp);
15551         if (err) {
15552                 dev_err(&pdev->dev,
15553                         "Problem fetching invariants of chip, aborting\n");
15554                 goto err_out_apeunmap;
15555         }
15556
15557         /* The EPB bridge inside 5714, 5715, and 5780 and any
15558          * device behind the EPB cannot support DMA addresses > 40-bit.
15559          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15560          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15561          * do DMA address check in tg3_start_xmit().
15562          */
15563         if (tg3_flag(tp, IS_5788))
15564                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15565         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15566                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15567 #ifdef CONFIG_HIGHMEM
15568                 dma_mask = DMA_BIT_MASK(64);
15569 #endif
15570         } else
15571                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15572
15573         /* Configure DMA attributes. */
15574         if (dma_mask > DMA_BIT_MASK(32)) {
15575                 err = pci_set_dma_mask(pdev, dma_mask);
15576                 if (!err) {
15577                         features |= NETIF_F_HIGHDMA;
15578                         err = pci_set_consistent_dma_mask(pdev,
15579                                                           persist_dma_mask);
15580                         if (err < 0) {
15581                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15582                                         "DMA for consistent allocations\n");
15583                                 goto err_out_apeunmap;
15584                         }
15585                 }
15586         }
15587         if (err || dma_mask == DMA_BIT_MASK(32)) {
15588                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15589                 if (err) {
15590                         dev_err(&pdev->dev,
15591                                 "No usable DMA configuration, aborting\n");
15592                         goto err_out_apeunmap;
15593                 }
15594         }
15595
15596         tg3_init_bufmgr_config(tp);
15597
15598         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15599
15600         /* 5700 B0 chips do not support checksumming correctly due
15601          * to hardware bugs.
15602          */
15603         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15604                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15605
15606                 if (tg3_flag(tp, 5755_PLUS))
15607                         features |= NETIF_F_IPV6_CSUM;
15608         }
15609
15610         /* TSO is on by default on chips that support hardware TSO.
15611          * Firmware TSO on older chips gives lower performance, so it
15612          * is off by default, but can be enabled using ethtool.
15613          */
15614         if ((tg3_flag(tp, HW_TSO_1) ||
15615              tg3_flag(tp, HW_TSO_2) ||
15616              tg3_flag(tp, HW_TSO_3)) &&
15617             (features & NETIF_F_IP_CSUM))
15618                 features |= NETIF_F_TSO;
15619         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15620                 if (features & NETIF_F_IPV6_CSUM)
15621                         features |= NETIF_F_TSO6;
15622                 if (tg3_flag(tp, HW_TSO_3) ||
15623                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15624                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15625                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15626                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15627                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15628                         features |= NETIF_F_TSO_ECN;
15629         }
15630
15631         dev->features |= features;
15632         dev->vlan_features |= features;
15633
15634         /*
15635          * Add loopback capability only for a subset of devices that support
15636          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15637          * loopback for the remaining devices.
15638          */
15639         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15640             !tg3_flag(tp, CPMU_PRESENT))
15641                 /* Add the loopback capability */
15642                 features |= NETIF_F_LOOPBACK;
15643
15644         dev->hw_features |= features;
15645
15646         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15647             !tg3_flag(tp, TSO_CAPABLE) &&
15648             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15649                 tg3_flag_set(tp, MAX_RXPEND_64);
15650                 tp->rx_pending = 63;
15651         }
15652
15653         err = tg3_get_device_address(tp);
15654         if (err) {
15655                 dev_err(&pdev->dev,
15656                         "Could not obtain valid ethernet address, aborting\n");
15657                 goto err_out_apeunmap;
15658         }
15659
15660         /*
15661          * Reset chip in case UNDI or EFI driver did not shutdown
15662          * DMA self test will enable WDMAC and we'll see (spurious)
15663          * pending DMA on the PCI bus at that point.
15664          */
15665         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15666             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15667                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15668                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15669         }
15670
15671         err = tg3_test_dma(tp);
15672         if (err) {
15673                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15674                 goto err_out_apeunmap;
15675         }
15676
15677         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15678         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15679         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15680         for (i = 0; i < tp->irq_max; i++) {
15681                 struct tg3_napi *tnapi = &tp->napi[i];
15682
15683                 tnapi->tp = tp;
15684                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15685
15686                 tnapi->int_mbox = intmbx;
15687                 if (i <= 4)
15688                         intmbx += 0x8;
15689                 else
15690                         intmbx += 0x4;
15691
15692                 tnapi->consmbox = rcvmbx;
15693                 tnapi->prodmbox = sndmbx;
15694
15695                 if (i)
15696                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15697                 else
15698                         tnapi->coal_now = HOSTCC_MODE_NOW;
15699
15700                 if (!tg3_flag(tp, SUPPORT_MSIX))
15701                         break;
15702
15703                 /*
15704                  * If we support MSIX, we'll be using RSS.  If we're using
15705                  * RSS, the first vector only handles link interrupts and the
15706                  * remaining vectors handle rx and tx interrupts.  Reuse the
15707                  * mailbox values for the next iteration.  The values we setup
15708                  * above are still useful for the single vectored mode.
15709                  */
15710                 if (!i)
15711                         continue;
15712
15713                 rcvmbx += 0x8;
15714
15715                 if (sndmbx & 0x4)
15716                         sndmbx -= 0x4;
15717                 else
15718                         sndmbx += 0xc;
15719         }
15720
15721         tg3_init_coal(tp);
15722
15723         pci_set_drvdata(pdev, dev);
15724
15725         if (tg3_flag(tp, 5717_PLUS)) {
15726                 /* Resume a low-power mode */
15727                 tg3_frob_aux_power(tp, false);
15728         }
15729
15730         tg3_timer_init(tp);
15731
15732         err = register_netdev(dev);
15733         if (err) {
15734                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15735                 goto err_out_apeunmap;
15736         }
15737
15738         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15739                     tp->board_part_number,
15740                     tp->pci_chip_rev_id,
15741                     tg3_bus_string(tp, str),
15742                     dev->dev_addr);
15743
15744         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15745                 struct phy_device *phydev;
15746                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15747                 netdev_info(dev,
15748                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15749                             phydev->drv->name, dev_name(&phydev->dev));
15750         } else {
15751                 char *ethtype;
15752
15753                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15754                         ethtype = "10/100Base-TX";
15755                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15756                         ethtype = "1000Base-SX";
15757                 else
15758                         ethtype = "10/100/1000Base-T";
15759
15760                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15761                             "(WireSpeed[%d], EEE[%d])\n",
15762                             tg3_phy_string(tp), ethtype,
15763                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15764                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15765         }
15766
15767         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15768                     (dev->features & NETIF_F_RXCSUM) != 0,
15769                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15770                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15771                     tg3_flag(tp, ENABLE_ASF) != 0,
15772                     tg3_flag(tp, TSO_CAPABLE) != 0);
15773         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15774                     tp->dma_rwctrl,
15775                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15776                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15777
15778         pci_save_state(pdev);
15779
15780         return 0;
15781
15782 err_out_apeunmap:
15783         if (tp->aperegs) {
15784                 iounmap(tp->aperegs);
15785                 tp->aperegs = NULL;
15786         }
15787
15788 err_out_iounmap:
15789         if (tp->regs) {
15790                 iounmap(tp->regs);
15791                 tp->regs = NULL;
15792         }
15793
15794 err_out_free_dev:
15795         free_netdev(dev);
15796
15797 err_out_power_down:
15798         pci_set_power_state(pdev, PCI_D3hot);
15799
15800 err_out_free_res:
15801         pci_release_regions(pdev);
15802
15803 err_out_disable_pdev:
15804         pci_disable_device(pdev);
15805         pci_set_drvdata(pdev, NULL);
15806         return err;
15807 }
15808
15809 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15810 {
15811         struct net_device *dev = pci_get_drvdata(pdev);
15812
15813         if (dev) {
15814                 struct tg3 *tp = netdev_priv(dev);
15815
15816                 if (tp->fw)
15817                         release_firmware(tp->fw);
15818
15819                 tg3_reset_task_cancel(tp);
15820
15821                 if (tg3_flag(tp, USE_PHYLIB)) {
15822                         tg3_phy_fini(tp);
15823                         tg3_mdio_fini(tp);
15824                 }
15825
15826                 unregister_netdev(dev);
15827                 if (tp->aperegs) {
15828                         iounmap(tp->aperegs);
15829                         tp->aperegs = NULL;
15830                 }
15831                 if (tp->regs) {
15832                         iounmap(tp->regs);
15833                         tp->regs = NULL;
15834                 }
15835                 free_netdev(dev);
15836                 pci_release_regions(pdev);
15837                 pci_disable_device(pdev);
15838                 pci_set_drvdata(pdev, NULL);
15839         }
15840 }
15841
15842 #ifdef CONFIG_PM_SLEEP
15843 static int tg3_suspend(struct device *device)
15844 {
15845         struct pci_dev *pdev = to_pci_dev(device);
15846         struct net_device *dev = pci_get_drvdata(pdev);
15847         struct tg3 *tp = netdev_priv(dev);
15848         int err;
15849
15850         if (!netif_running(dev))
15851                 return 0;
15852
15853         tg3_reset_task_cancel(tp);
15854         tg3_phy_stop(tp);
15855         tg3_netif_stop(tp);
15856
15857         tg3_timer_stop(tp);
15858
15859         tg3_full_lock(tp, 1);
15860         tg3_disable_ints(tp);
15861         tg3_full_unlock(tp);
15862
15863         netif_device_detach(dev);
15864
15865         tg3_full_lock(tp, 0);
15866         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15867         tg3_flag_clear(tp, INIT_COMPLETE);
15868         tg3_full_unlock(tp);
15869
15870         err = tg3_power_down_prepare(tp);
15871         if (err) {
15872                 int err2;
15873
15874                 tg3_full_lock(tp, 0);
15875
15876                 tg3_flag_set(tp, INIT_COMPLETE);
15877                 err2 = tg3_restart_hw(tp, 1);
15878                 if (err2)
15879                         goto out;
15880
15881                 tg3_timer_start(tp);
15882
15883                 netif_device_attach(dev);
15884                 tg3_netif_start(tp);
15885
15886 out:
15887                 tg3_full_unlock(tp);
15888
15889                 if (!err2)
15890                         tg3_phy_start(tp);
15891         }
15892
15893         return err;
15894 }
15895
15896 static int tg3_resume(struct device *device)
15897 {
15898         struct pci_dev *pdev = to_pci_dev(device);
15899         struct net_device *dev = pci_get_drvdata(pdev);
15900         struct tg3 *tp = netdev_priv(dev);
15901         int err;
15902
15903         if (!netif_running(dev))
15904                 return 0;
15905
15906         netif_device_attach(dev);
15907
15908         tg3_full_lock(tp, 0);
15909
15910         tg3_flag_set(tp, INIT_COMPLETE);
15911         err = tg3_restart_hw(tp, 1);
15912         if (err)
15913                 goto out;
15914
15915         tg3_timer_start(tp);
15916
15917         tg3_netif_start(tp);
15918
15919 out:
15920         tg3_full_unlock(tp);
15921
15922         if (!err)
15923                 tg3_phy_start(tp);
15924
15925         return err;
15926 }
15927
15928 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15929 #define TG3_PM_OPS (&tg3_pm_ops)
15930
15931 #else
15932
15933 #define TG3_PM_OPS NULL
15934
15935 #endif /* CONFIG_PM_SLEEP */
15936
15937 /**
15938  * tg3_io_error_detected - called when PCI error is detected
15939  * @pdev: Pointer to PCI device
15940  * @state: The current pci connection state
15941  *
15942  * This function is called after a PCI bus error affecting
15943  * this device has been detected.
15944  */
15945 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15946                                               pci_channel_state_t state)
15947 {
15948         struct net_device *netdev = pci_get_drvdata(pdev);
15949         struct tg3 *tp = netdev_priv(netdev);
15950         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15951
15952         netdev_info(netdev, "PCI I/O error detected\n");
15953
15954         rtnl_lock();
15955
15956         if (!netif_running(netdev))
15957                 goto done;
15958
15959         tg3_phy_stop(tp);
15960
15961         tg3_netif_stop(tp);
15962
15963         tg3_timer_stop(tp);
15964
15965         /* Want to make sure that the reset task doesn't run */
15966         tg3_reset_task_cancel(tp);
15967
15968         netif_device_detach(netdev);
15969
15970         /* Clean up software state, even if MMIO is blocked */
15971         tg3_full_lock(tp, 0);
15972         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15973         tg3_full_unlock(tp);
15974
15975 done:
15976         if (state == pci_channel_io_perm_failure)
15977                 err = PCI_ERS_RESULT_DISCONNECT;
15978         else
15979                 pci_disable_device(pdev);
15980
15981         rtnl_unlock();
15982
15983         return err;
15984 }
15985
15986 /**
15987  * tg3_io_slot_reset - called after the pci bus has been reset.
15988  * @pdev: Pointer to PCI device
15989  *
15990  * Restart the card from scratch, as if from a cold-boot.
15991  * At this point, the card has exprienced a hard reset,
15992  * followed by fixups by BIOS, and has its config space
15993  * set up identically to what it was at cold boot.
15994  */
15995 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15996 {
15997         struct net_device *netdev = pci_get_drvdata(pdev);
15998         struct tg3 *tp = netdev_priv(netdev);
15999         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16000         int err;
16001
16002         rtnl_lock();
16003
16004         if (pci_enable_device(pdev)) {
16005                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16006                 goto done;
16007         }
16008
16009         pci_set_master(pdev);
16010         pci_restore_state(pdev);
16011         pci_save_state(pdev);
16012
16013         if (!netif_running(netdev)) {
16014                 rc = PCI_ERS_RESULT_RECOVERED;
16015                 goto done;
16016         }
16017
16018         err = tg3_power_up(tp);
16019         if (err)
16020                 goto done;
16021
16022         rc = PCI_ERS_RESULT_RECOVERED;
16023
16024 done:
16025         rtnl_unlock();
16026
16027         return rc;
16028 }
16029
16030 /**
16031  * tg3_io_resume - called when traffic can start flowing again.
16032  * @pdev: Pointer to PCI device
16033  *
16034  * This callback is called when the error recovery driver tells
16035  * us that its OK to resume normal operation.
16036  */
16037 static void tg3_io_resume(struct pci_dev *pdev)
16038 {
16039         struct net_device *netdev = pci_get_drvdata(pdev);
16040         struct tg3 *tp = netdev_priv(netdev);
16041         int err;
16042
16043         rtnl_lock();
16044
16045         if (!netif_running(netdev))
16046                 goto done;
16047
16048         tg3_full_lock(tp, 0);
16049         tg3_flag_set(tp, INIT_COMPLETE);
16050         err = tg3_restart_hw(tp, 1);
16051         tg3_full_unlock(tp);
16052         if (err) {
16053                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16054                 goto done;
16055         }
16056
16057         netif_device_attach(netdev);
16058
16059         tg3_timer_start(tp);
16060
16061         tg3_netif_start(tp);
16062
16063         tg3_phy_start(tp);
16064
16065 done:
16066         rtnl_unlock();
16067 }
16068
16069 static struct pci_error_handlers tg3_err_handler = {
16070         .error_detected = tg3_io_error_detected,
16071         .slot_reset     = tg3_io_slot_reset,
16072         .resume         = tg3_io_resume
16073 };
16074
16075 static struct pci_driver tg3_driver = {
16076         .name           = DRV_MODULE_NAME,
16077         .id_table       = tg3_pci_tbl,
16078         .probe          = tg3_init_one,
16079         .remove         = __devexit_p(tg3_remove_one),
16080         .err_handler    = &tg3_err_handler,
16081         .driver.pm      = TG3_PM_OPS,
16082 };
16083
16084 static int __init tg3_init(void)
16085 {
16086         return pci_register_driver(&tg3_driver);
16087 }
16088
16089 static void __exit tg3_cleanup(void)
16090 {
16091         pci_unregister_driver(&tg3_driver);
16092 }
16093
16094 module_init(tg3_init);
16095 module_exit(tg3_cleanup);