]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - drivers/net/ethernet/broadcom/tg3.c
regulator: max8997: Convert max8997_safeout_ops to set_voltage_sel and list_voltage_table
[karo-tx-linux.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     128
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "December 03, 2012"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
341         {}
342 };
343
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
349         { "rx_octets" },
350         { "rx_fragments" },
351         { "rx_ucast_packets" },
352         { "rx_mcast_packets" },
353         { "rx_bcast_packets" },
354         { "rx_fcs_errors" },
355         { "rx_align_errors" },
356         { "rx_xon_pause_rcvd" },
357         { "rx_xoff_pause_rcvd" },
358         { "rx_mac_ctrl_rcvd" },
359         { "rx_xoff_entered" },
360         { "rx_frame_too_long_errors" },
361         { "rx_jabbers" },
362         { "rx_undersize_packets" },
363         { "rx_in_length_errors" },
364         { "rx_out_length_errors" },
365         { "rx_64_or_less_octet_packets" },
366         { "rx_65_to_127_octet_packets" },
367         { "rx_128_to_255_octet_packets" },
368         { "rx_256_to_511_octet_packets" },
369         { "rx_512_to_1023_octet_packets" },
370         { "rx_1024_to_1522_octet_packets" },
371         { "rx_1523_to_2047_octet_packets" },
372         { "rx_2048_to_4095_octet_packets" },
373         { "rx_4096_to_8191_octet_packets" },
374         { "rx_8192_to_9022_octet_packets" },
375
376         { "tx_octets" },
377         { "tx_collisions" },
378
379         { "tx_xon_sent" },
380         { "tx_xoff_sent" },
381         { "tx_flow_control" },
382         { "tx_mac_errors" },
383         { "tx_single_collisions" },
384         { "tx_mult_collisions" },
385         { "tx_deferred" },
386         { "tx_excessive_collisions" },
387         { "tx_late_collisions" },
388         { "tx_collide_2times" },
389         { "tx_collide_3times" },
390         { "tx_collide_4times" },
391         { "tx_collide_5times" },
392         { "tx_collide_6times" },
393         { "tx_collide_7times" },
394         { "tx_collide_8times" },
395         { "tx_collide_9times" },
396         { "tx_collide_10times" },
397         { "tx_collide_11times" },
398         { "tx_collide_12times" },
399         { "tx_collide_13times" },
400         { "tx_collide_14times" },
401         { "tx_collide_15times" },
402         { "tx_ucast_packets" },
403         { "tx_mcast_packets" },
404         { "tx_bcast_packets" },
405         { "tx_carrier_sense_errors" },
406         { "tx_discards" },
407         { "tx_errors" },
408
409         { "dma_writeq_full" },
410         { "dma_write_prioq_full" },
411         { "rxbds_empty" },
412         { "rx_discards" },
413         { "rx_errors" },
414         { "rx_threshold_hit" },
415
416         { "dma_readq_full" },
417         { "dma_read_prioq_full" },
418         { "tx_comp_queue_full" },
419
420         { "ring_set_send_prod_index" },
421         { "ring_status_update" },
422         { "nic_irqs" },
423         { "nic_avoided_irqs" },
424         { "nic_tx_threshold_hit" },
425
426         { "mbuf_lwm_thresh_hit" },
427 };
428
429 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST          0
431 #define TG3_LINK_TEST           1
432 #define TG3_REGISTER_TEST       2
433 #define TG3_MEMORY_TEST         3
434 #define TG3_MAC_LOOPB_TEST      4
435 #define TG3_PHY_LOOPB_TEST      5
436 #define TG3_EXT_LOOPB_TEST      6
437 #define TG3_INTERRUPT_TEST      7
438
439
440 static const struct {
441         const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
444         [TG3_LINK_TEST]         = { "link test         (online) " },
445         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
446         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
447         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
448         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
449         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
450         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
451 };
452
453 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
454
455
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off);
459 }
460
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
462 {
463         return readl(tp->regs + off);
464 }
465
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
467 {
468         writel(val, tp->aperegs + off);
469 }
470
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
472 {
473         return readl(tp->aperegs + off);
474 }
475
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
487 {
488         writel(val, tp->regs + off);
489         readl(tp->regs + off);
490 }
491
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
493 {
494         unsigned long flags;
495         u32 val;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501         return val;
502 }
503
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
505 {
506         unsigned long flags;
507
508         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510                                        TG3_64BIT_REG_LOW, val);
511                 return;
512         }
513         if (off == TG3_RX_STD_PROD_IDX_REG) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522         spin_unlock_irqrestore(&tp->indirect_lock, flags);
523
524         /* In indirect mode when disabling interrupts, we also need
525          * to clear the interrupt bit in the GRC local ctrl register.
526          */
527         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
528             (val == 0x1)) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
531         }
532 }
533
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
535 {
536         unsigned long flags;
537         u32 val;
538
539         spin_lock_irqsave(&tp->indirect_lock, flags);
540         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542         spin_unlock_irqrestore(&tp->indirect_lock, flags);
543         return val;
544 }
545
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547  * where it is unsafe to read back the register without some delay.
548  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
550  */
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
552 {
553         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554                 /* Non-posted methods */
555                 tp->write32(tp, off, val);
556         else {
557                 /* Posted method */
558                 tg3_write32(tp, off, val);
559                 if (usec_wait)
560                         udelay(usec_wait);
561                 tp->read32(tp, off);
562         }
563         /* Wait again after the read for the posted method to guarantee that
564          * the wait time is met.
565          */
566         if (usec_wait)
567                 udelay(usec_wait);
568 }
569
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
571 {
572         tp->write32_mbox(tp, off, val);
573         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
574                 tp->read32_mbox(tp, off);
575 }
576
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
578 {
579         void __iomem *mbox = tp->regs + off;
580         writel(val, mbox);
581         if (tg3_flag(tp, TXD_MBOX_HWBUG))
582                 writel(val, mbox);
583         if (tg3_flag(tp, MBOX_WRITE_REORDER))
584                 readl(mbox);
585 }
586
587 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
588 {
589         return readl(tp->regs + off + GRCMBOX_BASE);
590 }
591
592 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
593 {
594         writel(val, tp->regs + off + GRCMBOX_BASE);
595 }
596
597 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
602
603 #define tw32(reg, val)                  tp->write32(tp, reg, val)
604 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg)                       tp->read32(tp, reg)
607
608 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
609 {
610         unsigned long flags;
611
612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
613             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
614                 return;
615
616         spin_lock_irqsave(&tp->indirect_lock, flags);
617         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
618                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
619                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
620
621                 /* Always leave this as zero. */
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
623         } else {
624                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
625                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
626
627                 /* Always leave this as zero. */
628                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
629         }
630         spin_unlock_irqrestore(&tp->indirect_lock, flags);
631 }
632
633 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
634 {
635         unsigned long flags;
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
638             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
639                 *val = 0;
640                 return;
641         }
642
643         spin_lock_irqsave(&tp->indirect_lock, flags);
644         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
645                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         } else {
651                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
652                 *val = tr32(TG3PCI_MEM_WIN_DATA);
653
654                 /* Always leave this as zero. */
655                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
656         }
657         spin_unlock_irqrestore(&tp->indirect_lock, flags);
658 }
659
660 static void tg3_ape_lock_init(struct tg3 *tp)
661 {
662         int i;
663         u32 regbase, bit;
664
665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666                 regbase = TG3_APE_LOCK_GRANT;
667         else
668                 regbase = TG3_APE_PER_LOCK_GRANT;
669
670         /* Make sure the driver hasn't any stale locks. */
671         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
672                 switch (i) {
673                 case TG3_APE_LOCK_PHY0:
674                 case TG3_APE_LOCK_PHY1:
675                 case TG3_APE_LOCK_PHY2:
676                 case TG3_APE_LOCK_PHY3:
677                         bit = APE_LOCK_GRANT_DRIVER;
678                         break;
679                 default:
680                         if (!tp->pci_fn)
681                                 bit = APE_LOCK_GRANT_DRIVER;
682                         else
683                                 bit = 1 << tp->pci_fn;
684                 }
685                 tg3_ape_write32(tp, regbase + 4 * i, bit);
686         }
687
688 }
689
690 static int tg3_ape_lock(struct tg3 *tp, int locknum)
691 {
692         int i, off;
693         int ret = 0;
694         u32 status, req, gnt, bit;
695
696         if (!tg3_flag(tp, ENABLE_APE))
697                 return 0;
698
699         switch (locknum) {
700         case TG3_APE_LOCK_GPIO:
701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
702                         return 0;
703         case TG3_APE_LOCK_GRC:
704         case TG3_APE_LOCK_MEM:
705                 if (!tp->pci_fn)
706                         bit = APE_LOCK_REQ_DRIVER;
707                 else
708                         bit = 1 << tp->pci_fn;
709                 break;
710         case TG3_APE_LOCK_PHY0:
711         case TG3_APE_LOCK_PHY1:
712         case TG3_APE_LOCK_PHY2:
713         case TG3_APE_LOCK_PHY3:
714                 bit = APE_LOCK_REQ_DRIVER;
715                 break;
716         default:
717                 return -EINVAL;
718         }
719
720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
721                 req = TG3_APE_LOCK_REQ;
722                 gnt = TG3_APE_LOCK_GRANT;
723         } else {
724                 req = TG3_APE_PER_LOCK_REQ;
725                 gnt = TG3_APE_PER_LOCK_GRANT;
726         }
727
728         off = 4 * locknum;
729
730         tg3_ape_write32(tp, req + off, bit);
731
732         /* Wait for up to 1 millisecond to acquire lock. */
733         for (i = 0; i < 100; i++) {
734                 status = tg3_ape_read32(tp, gnt + off);
735                 if (status == bit)
736                         break;
737                 udelay(10);
738         }
739
740         if (status != bit) {
741                 /* Revoke the lock request. */
742                 tg3_ape_write32(tp, gnt + off, bit);
743                 ret = -EBUSY;
744         }
745
746         return ret;
747 }
748
749 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
750 {
751         u32 gnt, bit;
752
753         if (!tg3_flag(tp, ENABLE_APE))
754                 return;
755
756         switch (locknum) {
757         case TG3_APE_LOCK_GPIO:
758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
759                         return;
760         case TG3_APE_LOCK_GRC:
761         case TG3_APE_LOCK_MEM:
762                 if (!tp->pci_fn)
763                         bit = APE_LOCK_GRANT_DRIVER;
764                 else
765                         bit = 1 << tp->pci_fn;
766                 break;
767         case TG3_APE_LOCK_PHY0:
768         case TG3_APE_LOCK_PHY1:
769         case TG3_APE_LOCK_PHY2:
770         case TG3_APE_LOCK_PHY3:
771                 bit = APE_LOCK_GRANT_DRIVER;
772                 break;
773         default:
774                 return;
775         }
776
777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
778                 gnt = TG3_APE_LOCK_GRANT;
779         else
780                 gnt = TG3_APE_PER_LOCK_GRANT;
781
782         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
783 }
784
785 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
786 {
787         u32 apedata;
788
789         while (timeout_us) {
790                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
791                         return -EBUSY;
792
793                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
794                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
795                         break;
796
797                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
798
799                 udelay(10);
800                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
801         }
802
803         return timeout_us ? 0 : -EBUSY;
804 }
805
806 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 i, apedata;
809
810         for (i = 0; i < timeout_us / 10; i++) {
811                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812
813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
814                         break;
815
816                 udelay(10);
817         }
818
819         return i == timeout_us / 10;
820 }
821
822 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
823                                    u32 len)
824 {
825         int err;
826         u32 i, bufoff, msgoff, maxlen, apedata;
827
828         if (!tg3_flag(tp, APE_HAS_NCSI))
829                 return 0;
830
831         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
832         if (apedata != APE_SEG_SIG_MAGIC)
833                 return -ENODEV;
834
835         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
836         if (!(apedata & APE_FW_STATUS_READY))
837                 return -EAGAIN;
838
839         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
840                  TG3_APE_SHMEM_BASE;
841         msgoff = bufoff + 2 * sizeof(u32);
842         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
843
844         while (len) {
845                 u32 length;
846
847                 /* Cap xfer sizes to scratchpad limits. */
848                 length = (len > maxlen) ? maxlen : len;
849                 len -= length;
850
851                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
852                 if (!(apedata & APE_FW_STATUS_READY))
853                         return -EAGAIN;
854
855                 /* Wait for up to 1 msec for APE to service previous event. */
856                 err = tg3_ape_event_lock(tp, 1000);
857                 if (err)
858                         return err;
859
860                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
861                           APE_EVENT_STATUS_SCRTCHPD_READ |
862                           APE_EVENT_STATUS_EVENT_PENDING;
863                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
864
865                 tg3_ape_write32(tp, bufoff, base_off);
866                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
867
868                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
869                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
870
871                 base_off += length;
872
873                 if (tg3_ape_wait_for_event(tp, 30000))
874                         return -EAGAIN;
875
876                 for (i = 0; length; i += 4, length -= 4) {
877                         u32 val = tg3_ape_read32(tp, msgoff + i);
878                         memcpy(data, &val, sizeof(u32));
879                         data++;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
887 {
888         int err;
889         u32 apedata;
890
891         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
892         if (apedata != APE_SEG_SIG_MAGIC)
893                 return -EAGAIN;
894
895         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
896         if (!(apedata & APE_FW_STATUS_READY))
897                 return -EAGAIN;
898
899         /* Wait for up to 1 millisecond for APE to service previous event. */
900         err = tg3_ape_event_lock(tp, 1000);
901         if (err)
902                 return err;
903
904         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
905                         event | APE_EVENT_STATUS_EVENT_PENDING);
906
907         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
908         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
909
910         return 0;
911 }
912
913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
914 {
915         u32 event;
916         u32 apedata;
917
918         if (!tg3_flag(tp, ENABLE_APE))
919                 return;
920
921         switch (kind) {
922         case RESET_KIND_INIT:
923                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
924                                 APE_HOST_SEG_SIG_MAGIC);
925                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
926                                 APE_HOST_SEG_LEN_MAGIC);
927                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
928                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
929                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
930                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
931                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
932                                 APE_HOST_BEHAV_NO_PHYLOCK);
933                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
934                                     TG3_APE_HOST_DRVR_STATE_START);
935
936                 event = APE_EVENT_STATUS_STATE_START;
937                 break;
938         case RESET_KIND_SHUTDOWN:
939                 /* With the interface we are currently using,
940                  * APE does not track driver state.  Wiping
941                  * out the HOST SEGMENT SIGNATURE forces
942                  * the APE to assume OS absent status.
943                  */
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
945
946                 if (device_may_wakeup(&tp->pdev->dev) &&
947                     tg3_flag(tp, WOL_ENABLE)) {
948                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
949                                             TG3_APE_HOST_WOL_SPEED_AUTO);
950                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
951                 } else
952                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
953
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
955
956                 event = APE_EVENT_STATUS_STATE_UNLOAD;
957                 break;
958         case RESET_KIND_SUSPEND:
959                 event = APE_EVENT_STATUS_STATE_SUSPEND;
960                 break;
961         default:
962                 return;
963         }
964
965         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
966
967         tg3_ape_send_event(tp, event);
968 }
969
970 static void tg3_disable_ints(struct tg3 *tp)
971 {
972         int i;
973
974         tw32(TG3PCI_MISC_HOST_CTRL,
975              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
976         for (i = 0; i < tp->irq_max; i++)
977                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
978 }
979
980 static void tg3_enable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tp->irq_sync = 0;
985         wmb();
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
989
990         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
991         for (i = 0; i < tp->irq_cnt; i++) {
992                 struct tg3_napi *tnapi = &tp->napi[i];
993
994                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
995                 if (tg3_flag(tp, 1SHOT_MSI))
996                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
997
998                 tp->coal_now |= tnapi->coal_now;
999         }
1000
1001         /* Force an initial interrupt */
1002         if (!tg3_flag(tp, TAGGED_STATUS) &&
1003             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1004                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1005         else
1006                 tw32(HOSTCC_MODE, tp->coal_now);
1007
1008         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1009 }
1010
1011 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1012 {
1013         struct tg3 *tp = tnapi->tp;
1014         struct tg3_hw_status *sblk = tnapi->hw_status;
1015         unsigned int work_exists = 0;
1016
1017         /* check for phy events */
1018         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1019                 if (sblk->status & SD_STATUS_LINK_CHG)
1020                         work_exists = 1;
1021         }
1022
1023         /* check for TX work to do */
1024         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1025                 work_exists = 1;
1026
1027         /* check for RX work to do */
1028         if (tnapi->rx_rcb_prod_idx &&
1029             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1030                 work_exists = 1;
1031
1032         return work_exists;
1033 }
1034
1035 /* tg3_int_reenable
1036  *  similar to tg3_enable_ints, but it accurately determines whether there
1037  *  is new work pending and can return without flushing the PIO write
1038  *  which reenables interrupts
1039  */
1040 static void tg3_int_reenable(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043
1044         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1045         mmiowb();
1046
1047         /* When doing tagged status, this work check is unnecessary.
1048          * The last_tag we write above tells the chip which piece of
1049          * work we've completed.
1050          */
1051         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1052                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1053                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1054 }
1055
1056 static void tg3_switch_clocks(struct tg3 *tp)
1057 {
1058         u32 clock_ctrl;
1059         u32 orig_clock_ctrl;
1060
1061         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1062                 return;
1063
1064         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1065
1066         orig_clock_ctrl = clock_ctrl;
1067         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1068                        CLOCK_CTRL_CLKRUN_OENABLE |
1069                        0x1f);
1070         tp->pci_clock_ctrl = clock_ctrl;
1071
1072         if (tg3_flag(tp, 5705_PLUS)) {
1073                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1074                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1075                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1076                 }
1077         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1078                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1079                             clock_ctrl |
1080                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1081                             40);
1082                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1084                             40);
1085         }
1086         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1087 }
1088
1089 #define PHY_BUSY_LOOPS  5000
1090
1091 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1092 {
1093         u32 frame_val;
1094         unsigned int loops;
1095         int ret;
1096
1097         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098                 tw32_f(MAC_MI_MODE,
1099                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_lock(tp, tp->phy_ape_lock);
1104
1105         *val = 0x0;
1106
1107         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1108                       MI_COM_PHY_ADDR_MASK);
1109         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1110                       MI_COM_REG_ADDR_MASK);
1111         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1112
1113         tw32_f(MAC_MI_COM, frame_val);
1114
1115         loops = PHY_BUSY_LOOPS;
1116         while (loops != 0) {
1117                 udelay(10);
1118                 frame_val = tr32(MAC_MI_COM);
1119
1120                 if ((frame_val & MI_COM_BUSY) == 0) {
1121                         udelay(5);
1122                         frame_val = tr32(MAC_MI_COM);
1123                         break;
1124                 }
1125                 loops -= 1;
1126         }
1127
1128         ret = -EBUSY;
1129         if (loops != 0) {
1130                 *val = frame_val & MI_COM_DATA_MASK;
1131                 ret = 0;
1132         }
1133
1134         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1136                 udelay(80);
1137         }
1138
1139         tg3_ape_unlock(tp, tp->phy_ape_lock);
1140
1141         return ret;
1142 }
1143
1144 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1145 {
1146         u32 frame_val;
1147         unsigned int loops;
1148         int ret;
1149
1150         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1151             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1152                 return 0;
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE,
1156                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1157                 udelay(80);
1158         }
1159
1160         tg3_ape_lock(tp, tp->phy_ape_lock);
1161
1162         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1163                       MI_COM_PHY_ADDR_MASK);
1164         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1165                       MI_COM_REG_ADDR_MASK);
1166         frame_val |= (val & MI_COM_DATA_MASK);
1167         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1168
1169         tw32_f(MAC_MI_COM, frame_val);
1170
1171         loops = PHY_BUSY_LOOPS;
1172         while (loops != 0) {
1173                 udelay(10);
1174                 frame_val = tr32(MAC_MI_COM);
1175                 if ((frame_val & MI_COM_BUSY) == 0) {
1176                         udelay(5);
1177                         frame_val = tr32(MAC_MI_COM);
1178                         break;
1179                 }
1180                 loops -= 1;
1181         }
1182
1183         ret = -EBUSY;
1184         if (loops != 0)
1185                 ret = 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1189                 udelay(80);
1190         }
1191
1192         tg3_ape_unlock(tp, tp->phy_ape_lock);
1193
1194         return ret;
1195 }
1196
1197 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1198 {
1199         int err;
1200
1201         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1202         if (err)
1203                 goto done;
1204
1205         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1206         if (err)
1207                 goto done;
1208
1209         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1210                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1211         if (err)
1212                 goto done;
1213
1214         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1215
1216 done:
1217         return err;
1218 }
1219
1220 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1221 {
1222         int err;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1229         if (err)
1230                 goto done;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1233                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1234         if (err)
1235                 goto done;
1236
1237         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1238
1239 done:
1240         return err;
1241 }
1242
1243 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1244 {
1245         int err;
1246
1247         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248         if (!err)
1249                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251         return err;
1252 }
1253
1254 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1255 {
1256         int err;
1257
1258         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1259         if (!err)
1260                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1261
1262         return err;
1263 }
1264
1265 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1270                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1271                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1279 {
1280         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1281                 set |= MII_TG3_AUXCTL_MISC_WREN;
1282
1283         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1284 }
1285
1286 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1287         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1288                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1289                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1290
1291 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1292         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1293                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1294
1295 static int tg3_bmcr_reset(struct tg3 *tp)
1296 {
1297         u32 phy_control;
1298         int limit, err;
1299
1300         /* OK, reset it, and poll the BMCR_RESET bit until it
1301          * clears or we time out.
1302          */
1303         phy_control = BMCR_RESET;
1304         err = tg3_writephy(tp, MII_BMCR, phy_control);
1305         if (err != 0)
1306                 return -EBUSY;
1307
1308         limit = 5000;
1309         while (limit--) {
1310                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1311                 if (err != 0)
1312                         return -EBUSY;
1313
1314                 if ((phy_control & BMCR_RESET) == 0) {
1315                         udelay(40);
1316                         break;
1317                 }
1318                 udelay(10);
1319         }
1320         if (limit < 0)
1321                 return -EBUSY;
1322
1323         return 0;
1324 }
1325
1326 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1327 {
1328         struct tg3 *tp = bp->priv;
1329         u32 val;
1330
1331         spin_lock_bh(&tp->lock);
1332
1333         if (tg3_readphy(tp, reg, &val))
1334                 val = -EIO;
1335
1336         spin_unlock_bh(&tp->lock);
1337
1338         return val;
1339 }
1340
1341 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1342 {
1343         struct tg3 *tp = bp->priv;
1344         u32 ret = 0;
1345
1346         spin_lock_bh(&tp->lock);
1347
1348         if (tg3_writephy(tp, reg, val))
1349                 ret = -EIO;
1350
1351         spin_unlock_bh(&tp->lock);
1352
1353         return ret;
1354 }
1355
1356 static int tg3_mdio_reset(struct mii_bus *bp)
1357 {
1358         return 0;
1359 }
1360
1361 static void tg3_mdio_config_5785(struct tg3 *tp)
1362 {
1363         u32 val;
1364         struct phy_device *phydev;
1365
1366         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1368         case PHY_ID_BCM50610:
1369         case PHY_ID_BCM50610M:
1370                 val = MAC_PHYCFG2_50610_LED_MODES;
1371                 break;
1372         case PHY_ID_BCMAC131:
1373                 val = MAC_PHYCFG2_AC131_LED_MODES;
1374                 break;
1375         case PHY_ID_RTL8211C:
1376                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1377                 break;
1378         case PHY_ID_RTL8201E:
1379                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1380                 break;
1381         default:
1382                 return;
1383         }
1384
1385         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1386                 tw32(MAC_PHYCFG2, val);
1387
1388                 val = tr32(MAC_PHYCFG1);
1389                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1390                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1391                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1392                 tw32(MAC_PHYCFG1, val);
1393
1394                 return;
1395         }
1396
1397         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1398                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1399                        MAC_PHYCFG2_FMODE_MASK_MASK |
1400                        MAC_PHYCFG2_GMODE_MASK_MASK |
1401                        MAC_PHYCFG2_ACT_MASK_MASK   |
1402                        MAC_PHYCFG2_QUAL_MASK_MASK |
1403                        MAC_PHYCFG2_INBAND_ENABLE;
1404
1405         tw32(MAC_PHYCFG2, val);
1406
1407         val = tr32(MAC_PHYCFG1);
1408         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1409                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1410         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1411                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1412                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1413                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1414                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1415         }
1416         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1417                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1418         tw32(MAC_PHYCFG1, val);
1419
1420         val = tr32(MAC_EXT_RGMII_MODE);
1421         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1422                  MAC_RGMII_MODE_RX_QUALITY |
1423                  MAC_RGMII_MODE_RX_ACTIVITY |
1424                  MAC_RGMII_MODE_RX_ENG_DET |
1425                  MAC_RGMII_MODE_TX_ENABLE |
1426                  MAC_RGMII_MODE_TX_LOWPWR |
1427                  MAC_RGMII_MODE_TX_RESET);
1428         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1429                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1430                         val |= MAC_RGMII_MODE_RX_INT_B |
1431                                MAC_RGMII_MODE_RX_QUALITY |
1432                                MAC_RGMII_MODE_RX_ACTIVITY |
1433                                MAC_RGMII_MODE_RX_ENG_DET;
1434                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1435                         val |= MAC_RGMII_MODE_TX_ENABLE |
1436                                MAC_RGMII_MODE_TX_LOWPWR |
1437                                MAC_RGMII_MODE_TX_RESET;
1438         }
1439         tw32(MAC_EXT_RGMII_MODE, val);
1440 }
1441
1442 static void tg3_mdio_start(struct tg3 *tp)
1443 {
1444         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1445         tw32_f(MAC_MI_MODE, tp->mi_mode);
1446         udelay(80);
1447
1448         if (tg3_flag(tp, MDIOBUS_INITED) &&
1449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1450                 tg3_mdio_config_5785(tp);
1451 }
1452
1453 static int tg3_mdio_init(struct tg3 *tp)
1454 {
1455         int i;
1456         u32 reg;
1457         struct phy_device *phydev;
1458
1459         if (tg3_flag(tp, 5717_PLUS)) {
1460                 u32 is_serdes;
1461
1462                 tp->phy_addr = tp->pci_fn + 1;
1463
1464                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1465                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1466                 else
1467                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1468                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1469                 if (is_serdes)
1470                         tp->phy_addr += 7;
1471         } else
1472                 tp->phy_addr = TG3_PHY_MII_ADDR;
1473
1474         tg3_mdio_start(tp);
1475
1476         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1477                 return 0;
1478
1479         tp->mdio_bus = mdiobus_alloc();
1480         if (tp->mdio_bus == NULL)
1481                 return -ENOMEM;
1482
1483         tp->mdio_bus->name     = "tg3 mdio bus";
1484         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1485                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1486         tp->mdio_bus->priv     = tp;
1487         tp->mdio_bus->parent   = &tp->pdev->dev;
1488         tp->mdio_bus->read     = &tg3_mdio_read;
1489         tp->mdio_bus->write    = &tg3_mdio_write;
1490         tp->mdio_bus->reset    = &tg3_mdio_reset;
1491         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1492         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1493
1494         for (i = 0; i < PHY_MAX_ADDR; i++)
1495                 tp->mdio_bus->irq[i] = PHY_POLL;
1496
1497         /* The bus registration will look for all the PHYs on the mdio bus.
1498          * Unfortunately, it does not ensure the PHY is powered up before
1499          * accessing the PHY ID registers.  A chip reset is the
1500          * quickest way to bring the device back to an operational state..
1501          */
1502         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1503                 tg3_bmcr_reset(tp);
1504
1505         i = mdiobus_register(tp->mdio_bus);
1506         if (i) {
1507                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1508                 mdiobus_free(tp->mdio_bus);
1509                 return i;
1510         }
1511
1512         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1513
1514         if (!phydev || !phydev->drv) {
1515                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1516                 mdiobus_unregister(tp->mdio_bus);
1517                 mdiobus_free(tp->mdio_bus);
1518                 return -ENODEV;
1519         }
1520
1521         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1522         case PHY_ID_BCM57780:
1523                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1524                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1525                 break;
1526         case PHY_ID_BCM50610:
1527         case PHY_ID_BCM50610M:
1528                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1529                                      PHY_BRCM_RX_REFCLK_UNUSED |
1530                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1531                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1532                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1533                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1534                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1535                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1536                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1537                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1538                 /* fallthru */
1539         case PHY_ID_RTL8211C:
1540                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1541                 break;
1542         case PHY_ID_RTL8201E:
1543         case PHY_ID_BCMAC131:
1544                 phydev->interface = PHY_INTERFACE_MODE_MII;
1545                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1546                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1547                 break;
1548         }
1549
1550         tg3_flag_set(tp, MDIOBUS_INITED);
1551
1552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1553                 tg3_mdio_config_5785(tp);
1554
1555         return 0;
1556 }
1557
1558 static void tg3_mdio_fini(struct tg3 *tp)
1559 {
1560         if (tg3_flag(tp, MDIOBUS_INITED)) {
1561                 tg3_flag_clear(tp, MDIOBUS_INITED);
1562                 mdiobus_unregister(tp->mdio_bus);
1563                 mdiobus_free(tp->mdio_bus);
1564         }
1565 }
1566
1567 /* tp->lock is held. */
1568 static inline void tg3_generate_fw_event(struct tg3 *tp)
1569 {
1570         u32 val;
1571
1572         val = tr32(GRC_RX_CPU_EVENT);
1573         val |= GRC_RX_CPU_DRIVER_EVENT;
1574         tw32_f(GRC_RX_CPU_EVENT, val);
1575
1576         tp->last_event_jiffies = jiffies;
1577 }
1578
1579 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1580
1581 /* tp->lock is held. */
1582 static void tg3_wait_for_event_ack(struct tg3 *tp)
1583 {
1584         int i;
1585         unsigned int delay_cnt;
1586         long time_remain;
1587
1588         /* If enough time has passed, no wait is necessary. */
1589         time_remain = (long)(tp->last_event_jiffies + 1 +
1590                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1591                       (long)jiffies;
1592         if (time_remain < 0)
1593                 return;
1594
1595         /* Check if we can shorten the wait time. */
1596         delay_cnt = jiffies_to_usecs(time_remain);
1597         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1598                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1599         delay_cnt = (delay_cnt >> 3) + 1;
1600
1601         for (i = 0; i < delay_cnt; i++) {
1602                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1603                         break;
1604                 udelay(8);
1605         }
1606 }
1607
1608 /* tp->lock is held. */
1609 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1610 {
1611         u32 reg, val;
1612
1613         val = 0;
1614         if (!tg3_readphy(tp, MII_BMCR, &reg))
1615                 val = reg << 16;
1616         if (!tg3_readphy(tp, MII_BMSR, &reg))
1617                 val |= (reg & 0xffff);
1618         *data++ = val;
1619
1620         val = 0;
1621         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1622                 val = reg << 16;
1623         if (!tg3_readphy(tp, MII_LPA, &reg))
1624                 val |= (reg & 0xffff);
1625         *data++ = val;
1626
1627         val = 0;
1628         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1629                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1630                         val = reg << 16;
1631                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1632                         val |= (reg & 0xffff);
1633         }
1634         *data++ = val;
1635
1636         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1637                 val = reg << 16;
1638         else
1639                 val = 0;
1640         *data++ = val;
1641 }
1642
1643 /* tp->lock is held. */
1644 static void tg3_ump_link_report(struct tg3 *tp)
1645 {
1646         u32 data[4];
1647
1648         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1649                 return;
1650
1651         tg3_phy_gather_ump_data(tp, data);
1652
1653         tg3_wait_for_event_ack(tp);
1654
1655         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1656         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1657         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1658         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1659         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1660         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1661
1662         tg3_generate_fw_event(tp);
1663 }
1664
1665 /* tp->lock is held. */
1666 static void tg3_stop_fw(struct tg3 *tp)
1667 {
1668         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1669                 /* Wait for RX cpu to ACK the previous event. */
1670                 tg3_wait_for_event_ack(tp);
1671
1672                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1673
1674                 tg3_generate_fw_event(tp);
1675
1676                 /* Wait for RX cpu to ACK this event. */
1677                 tg3_wait_for_event_ack(tp);
1678         }
1679 }
1680
1681 /* tp->lock is held. */
1682 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1683 {
1684         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1685                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1686
1687         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1688                 switch (kind) {
1689                 case RESET_KIND_INIT:
1690                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1691                                       DRV_STATE_START);
1692                         break;
1693
1694                 case RESET_KIND_SHUTDOWN:
1695                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1696                                       DRV_STATE_UNLOAD);
1697                         break;
1698
1699                 case RESET_KIND_SUSPEND:
1700                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1701                                       DRV_STATE_SUSPEND);
1702                         break;
1703
1704                 default:
1705                         break;
1706                 }
1707         }
1708
1709         if (kind == RESET_KIND_INIT ||
1710             kind == RESET_KIND_SUSPEND)
1711                 tg3_ape_driver_state_change(tp, kind);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1716 {
1717         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1718                 switch (kind) {
1719                 case RESET_KIND_INIT:
1720                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1721                                       DRV_STATE_START_DONE);
1722                         break;
1723
1724                 case RESET_KIND_SHUTDOWN:
1725                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1726                                       DRV_STATE_UNLOAD_DONE);
1727                         break;
1728
1729                 default:
1730                         break;
1731                 }
1732         }
1733
1734         if (kind == RESET_KIND_SHUTDOWN)
1735                 tg3_ape_driver_state_change(tp, kind);
1736 }
1737
1738 /* tp->lock is held. */
1739 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1740 {
1741         if (tg3_flag(tp, ENABLE_ASF)) {
1742                 switch (kind) {
1743                 case RESET_KIND_INIT:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_START);
1746                         break;
1747
1748                 case RESET_KIND_SHUTDOWN:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_UNLOAD);
1751                         break;
1752
1753                 case RESET_KIND_SUSPEND:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_SUSPEND);
1756                         break;
1757
1758                 default:
1759                         break;
1760                 }
1761         }
1762 }
1763
1764 static int tg3_poll_fw(struct tg3 *tp)
1765 {
1766         int i;
1767         u32 val;
1768
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1770                 /* Wait up to 20ms for init done. */
1771                 for (i = 0; i < 200; i++) {
1772                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1773                                 return 0;
1774                         udelay(100);
1775                 }
1776                 return -ENODEV;
1777         }
1778
1779         /* Wait for firmware initialization to complete. */
1780         for (i = 0; i < 100000; i++) {
1781                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1782                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1783                         break;
1784                 udelay(10);
1785         }
1786
1787         /* Chip might not be fitted with firmware.  Some Sun onboard
1788          * parts are configured like that.  So don't signal the timeout
1789          * of the above loop as an error, but do report the lack of
1790          * running firmware once.
1791          */
1792         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1793                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1794
1795                 netdev_info(tp->dev, "No firmware running\n");
1796         }
1797
1798         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1799                 /* The 57765 A0 needs a little more
1800                  * time to do some important work.
1801                  */
1802                 mdelay(10);
1803         }
1804
1805         return 0;
1806 }
1807
1808 static void tg3_link_report(struct tg3 *tp)
1809 {
1810         if (!netif_carrier_ok(tp->dev)) {
1811                 netif_info(tp, link, tp->dev, "Link is down\n");
1812                 tg3_ump_link_report(tp);
1813         } else if (netif_msg_link(tp)) {
1814                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1815                             (tp->link_config.active_speed == SPEED_1000 ?
1816                              1000 :
1817                              (tp->link_config.active_speed == SPEED_100 ?
1818                               100 : 10)),
1819                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1820                              "full" : "half"));
1821
1822                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1823                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1824                             "on" : "off",
1825                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1826                             "on" : "off");
1827
1828                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1829                         netdev_info(tp->dev, "EEE is %s\n",
1830                                     tp->setlpicnt ? "enabled" : "disabled");
1831
1832                 tg3_ump_link_report(tp);
1833         }
1834 }
1835
1836 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1837 {
1838         u16 miireg;
1839
1840         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1841                 miireg = ADVERTISE_1000XPAUSE;
1842         else if (flow_ctrl & FLOW_CTRL_TX)
1843                 miireg = ADVERTISE_1000XPSE_ASYM;
1844         else if (flow_ctrl & FLOW_CTRL_RX)
1845                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1846         else
1847                 miireg = 0;
1848
1849         return miireg;
1850 }
1851
1852 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1853 {
1854         u8 cap = 0;
1855
1856         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1857                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1858         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1859                 if (lcladv & ADVERTISE_1000XPAUSE)
1860                         cap = FLOW_CTRL_RX;
1861                 if (rmtadv & ADVERTISE_1000XPAUSE)
1862                         cap = FLOW_CTRL_TX;
1863         }
1864
1865         return cap;
1866 }
1867
1868 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1869 {
1870         u8 autoneg;
1871         u8 flowctrl = 0;
1872         u32 old_rx_mode = tp->rx_mode;
1873         u32 old_tx_mode = tp->tx_mode;
1874
1875         if (tg3_flag(tp, USE_PHYLIB))
1876                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1877         else
1878                 autoneg = tp->link_config.autoneg;
1879
1880         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1881                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1882                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1883                 else
1884                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1885         } else
1886                 flowctrl = tp->link_config.flowctrl;
1887
1888         tp->link_config.active_flowctrl = flowctrl;
1889
1890         if (flowctrl & FLOW_CTRL_RX)
1891                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1892         else
1893                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1894
1895         if (old_rx_mode != tp->rx_mode)
1896                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1897
1898         if (flowctrl & FLOW_CTRL_TX)
1899                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1900         else
1901                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1902
1903         if (old_tx_mode != tp->tx_mode)
1904                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1905 }
1906
1907 static void tg3_adjust_link(struct net_device *dev)
1908 {
1909         u8 oldflowctrl, linkmesg = 0;
1910         u32 mac_mode, lcl_adv, rmt_adv;
1911         struct tg3 *tp = netdev_priv(dev);
1912         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1913
1914         spin_lock_bh(&tp->lock);
1915
1916         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1917                                     MAC_MODE_HALF_DUPLEX);
1918
1919         oldflowctrl = tp->link_config.active_flowctrl;
1920
1921         if (phydev->link) {
1922                 lcl_adv = 0;
1923                 rmt_adv = 0;
1924
1925                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1926                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1927                 else if (phydev->speed == SPEED_1000 ||
1928                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1929                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1930                 else
1931                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1932
1933                 if (phydev->duplex == DUPLEX_HALF)
1934                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1935                 else {
1936                         lcl_adv = mii_advertise_flowctrl(
1937                                   tp->link_config.flowctrl);
1938
1939                         if (phydev->pause)
1940                                 rmt_adv = LPA_PAUSE_CAP;
1941                         if (phydev->asym_pause)
1942                                 rmt_adv |= LPA_PAUSE_ASYM;
1943                 }
1944
1945                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1946         } else
1947                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1948
1949         if (mac_mode != tp->mac_mode) {
1950                 tp->mac_mode = mac_mode;
1951                 tw32_f(MAC_MODE, tp->mac_mode);
1952                 udelay(40);
1953         }
1954
1955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1956                 if (phydev->speed == SPEED_10)
1957                         tw32(MAC_MI_STAT,
1958                              MAC_MI_STAT_10MBPS_MODE |
1959                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1960                 else
1961                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1962         }
1963
1964         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1965                 tw32(MAC_TX_LENGTHS,
1966                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1967                       (6 << TX_LENGTHS_IPG_SHIFT) |
1968                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1969         else
1970                 tw32(MAC_TX_LENGTHS,
1971                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1972                       (6 << TX_LENGTHS_IPG_SHIFT) |
1973                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1974
1975         if (phydev->link != tp->old_link ||
1976             phydev->speed != tp->link_config.active_speed ||
1977             phydev->duplex != tp->link_config.active_duplex ||
1978             oldflowctrl != tp->link_config.active_flowctrl)
1979                 linkmesg = 1;
1980
1981         tp->old_link = phydev->link;
1982         tp->link_config.active_speed = phydev->speed;
1983         tp->link_config.active_duplex = phydev->duplex;
1984
1985         spin_unlock_bh(&tp->lock);
1986
1987         if (linkmesg)
1988                 tg3_link_report(tp);
1989 }
1990
1991 static int tg3_phy_init(struct tg3 *tp)
1992 {
1993         struct phy_device *phydev;
1994
1995         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1996                 return 0;
1997
1998         /* Bring the PHY back to a known state. */
1999         tg3_bmcr_reset(tp);
2000
2001         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2002
2003         /* Attach the MAC to the PHY. */
2004         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2005                              phydev->dev_flags, phydev->interface);
2006         if (IS_ERR(phydev)) {
2007                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2008                 return PTR_ERR(phydev);
2009         }
2010
2011         /* Mask with MAC supported features. */
2012         switch (phydev->interface) {
2013         case PHY_INTERFACE_MODE_GMII:
2014         case PHY_INTERFACE_MODE_RGMII:
2015                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2016                         phydev->supported &= (PHY_GBIT_FEATURES |
2017                                               SUPPORTED_Pause |
2018                                               SUPPORTED_Asym_Pause);
2019                         break;
2020                 }
2021                 /* fallthru */
2022         case PHY_INTERFACE_MODE_MII:
2023                 phydev->supported &= (PHY_BASIC_FEATURES |
2024                                       SUPPORTED_Pause |
2025                                       SUPPORTED_Asym_Pause);
2026                 break;
2027         default:
2028                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2029                 return -EINVAL;
2030         }
2031
2032         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2033
2034         phydev->advertising = phydev->supported;
2035
2036         return 0;
2037 }
2038
2039 static void tg3_phy_start(struct tg3 *tp)
2040 {
2041         struct phy_device *phydev;
2042
2043         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2044                 return;
2045
2046         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2047
2048         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2049                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2050                 phydev->speed = tp->link_config.speed;
2051                 phydev->duplex = tp->link_config.duplex;
2052                 phydev->autoneg = tp->link_config.autoneg;
2053                 phydev->advertising = tp->link_config.advertising;
2054         }
2055
2056         phy_start(phydev);
2057
2058         phy_start_aneg(phydev);
2059 }
2060
2061 static void tg3_phy_stop(struct tg3 *tp)
2062 {
2063         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2064                 return;
2065
2066         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 }
2068
2069 static void tg3_phy_fini(struct tg3 *tp)
2070 {
2071         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2072                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2074         }
2075 }
2076
2077 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2078 {
2079         int err;
2080         u32 val;
2081
2082         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2083                 return 0;
2084
2085         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2086                 /* Cannot do read-modify-write on 5401 */
2087                 err = tg3_phy_auxctl_write(tp,
2088                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2089                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2090                                            0x4c20);
2091                 goto done;
2092         }
2093
2094         err = tg3_phy_auxctl_read(tp,
2095                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2096         if (err)
2097                 return err;
2098
2099         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2100         err = tg3_phy_auxctl_write(tp,
2101                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2102
2103 done:
2104         return err;
2105 }
2106
2107 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2108 {
2109         u32 phytest;
2110
2111         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2112                 u32 phy;
2113
2114                 tg3_writephy(tp, MII_TG3_FET_TEST,
2115                              phytest | MII_TG3_FET_SHADOW_EN);
2116                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2117                         if (enable)
2118                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2119                         else
2120                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2121                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2122                 }
2123                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2124         }
2125 }
2126
2127 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2128 {
2129         u32 reg;
2130
2131         if (!tg3_flag(tp, 5705_PLUS) ||
2132             (tg3_flag(tp, 5717_PLUS) &&
2133              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2134                 return;
2135
2136         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2137                 tg3_phy_fet_toggle_apd(tp, enable);
2138                 return;
2139         }
2140
2141         reg = MII_TG3_MISC_SHDW_WREN |
2142               MII_TG3_MISC_SHDW_SCR5_SEL |
2143               MII_TG3_MISC_SHDW_SCR5_LPED |
2144               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2145               MII_TG3_MISC_SHDW_SCR5_SDTL |
2146               MII_TG3_MISC_SHDW_SCR5_C125OE;
2147         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2148                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2149
2150         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2151
2152
2153         reg = MII_TG3_MISC_SHDW_WREN |
2154               MII_TG3_MISC_SHDW_APD_SEL |
2155               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2156         if (enable)
2157                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2158
2159         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2160 }
2161
2162 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2163 {
2164         u32 phy;
2165
2166         if (!tg3_flag(tp, 5705_PLUS) ||
2167             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2168                 return;
2169
2170         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2171                 u32 ephy;
2172
2173                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2174                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2175
2176                         tg3_writephy(tp, MII_TG3_FET_TEST,
2177                                      ephy | MII_TG3_FET_SHADOW_EN);
2178                         if (!tg3_readphy(tp, reg, &phy)) {
2179                                 if (enable)
2180                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2181                                 else
2182                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2183                                 tg3_writephy(tp, reg, phy);
2184                         }
2185                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2186                 }
2187         } else {
2188                 int ret;
2189
2190                 ret = tg3_phy_auxctl_read(tp,
2191                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2192                 if (!ret) {
2193                         if (enable)
2194                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2195                         else
2196                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2197                         tg3_phy_auxctl_write(tp,
2198                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2199                 }
2200         }
2201 }
2202
2203 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2204 {
2205         int ret;
2206         u32 val;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2209                 return;
2210
2211         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2212         if (!ret)
2213                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2214                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2215 }
2216
2217 static void tg3_phy_apply_otp(struct tg3 *tp)
2218 {
2219         u32 otp, phy;
2220
2221         if (!tp->phy_otp)
2222                 return;
2223
2224         otp = tp->phy_otp;
2225
2226         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2227                 return;
2228
2229         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2230         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2231         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2232
2233         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2234               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2235         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2236
2237         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2238         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2239         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2240
2241         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2242         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2243
2244         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2245         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2246
2247         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2248               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2249         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2250
2251         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2252 }
2253
2254 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2255 {
2256         u32 val;
2257
2258         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2259                 return;
2260
2261         tp->setlpicnt = 0;
2262
2263         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2264             current_link_up == 1 &&
2265             tp->link_config.active_duplex == DUPLEX_FULL &&
2266             (tp->link_config.active_speed == SPEED_100 ||
2267              tp->link_config.active_speed == SPEED_1000)) {
2268                 u32 eeectl;
2269
2270                 if (tp->link_config.active_speed == SPEED_1000)
2271                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2272                 else
2273                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2274
2275                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2276
2277                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2278                                   TG3_CL45_D7_EEERES_STAT, &val);
2279
2280                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2281                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2282                         tp->setlpicnt = 2;
2283         }
2284
2285         if (!tp->setlpicnt) {
2286                 if (current_link_up == 1 &&
2287                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2288                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2289                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2290                 }
2291
2292                 val = tr32(TG3_CPMU_EEE_MODE);
2293                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2294         }
2295 }
2296
2297 static void tg3_phy_eee_enable(struct tg3 *tp)
2298 {
2299         u32 val;
2300
2301         if (tp->link_config.active_speed == SPEED_1000 &&
2302             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2303              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2304              tg3_flag(tp, 57765_CLASS)) &&
2305             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2306                 val = MII_TG3_DSP_TAP26_ALNOKO |
2307                       MII_TG3_DSP_TAP26_RMRXSTO;
2308                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2309                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2310         }
2311
2312         val = tr32(TG3_CPMU_EEE_MODE);
2313         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2314 }
2315
2316 static int tg3_wait_macro_done(struct tg3 *tp)
2317 {
2318         int limit = 100;
2319
2320         while (limit--) {
2321                 u32 tmp32;
2322
2323                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2324                         if ((tmp32 & 0x1000) == 0)
2325                                 break;
2326                 }
2327         }
2328         if (limit < 0)
2329                 return -EBUSY;
2330
2331         return 0;
2332 }
2333
2334 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2335 {
2336         static const u32 test_pat[4][6] = {
2337         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2338         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2339         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2340         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2341         };
2342         int chan;
2343
2344         for (chan = 0; chan < 4; chan++) {
2345                 int i;
2346
2347                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2348                              (chan * 0x2000) | 0x0200);
2349                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2350
2351                 for (i = 0; i < 6; i++)
2352                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2353                                      test_pat[chan][i]);
2354
2355                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2356                 if (tg3_wait_macro_done(tp)) {
2357                         *resetp = 1;
2358                         return -EBUSY;
2359                 }
2360
2361                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2362                              (chan * 0x2000) | 0x0200);
2363                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2364                 if (tg3_wait_macro_done(tp)) {
2365                         *resetp = 1;
2366                         return -EBUSY;
2367                 }
2368
2369                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2370                 if (tg3_wait_macro_done(tp)) {
2371                         *resetp = 1;
2372                         return -EBUSY;
2373                 }
2374
2375                 for (i = 0; i < 6; i += 2) {
2376                         u32 low, high;
2377
2378                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2379                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2380                             tg3_wait_macro_done(tp)) {
2381                                 *resetp = 1;
2382                                 return -EBUSY;
2383                         }
2384                         low &= 0x7fff;
2385                         high &= 0x000f;
2386                         if (low != test_pat[chan][i] ||
2387                             high != test_pat[chan][i+1]) {
2388                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2389                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2390                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2391
2392                                 return -EBUSY;
2393                         }
2394                 }
2395         }
2396
2397         return 0;
2398 }
2399
2400 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2401 {
2402         int chan;
2403
2404         for (chan = 0; chan < 4; chan++) {
2405                 int i;
2406
2407                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2408                              (chan * 0x2000) | 0x0200);
2409                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2410                 for (i = 0; i < 6; i++)
2411                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2412                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2413                 if (tg3_wait_macro_done(tp))
2414                         return -EBUSY;
2415         }
2416
2417         return 0;
2418 }
2419
2420 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2421 {
2422         u32 reg32, phy9_orig;
2423         int retries, do_phy_reset, err;
2424
2425         retries = 10;
2426         do_phy_reset = 1;
2427         do {
2428                 if (do_phy_reset) {
2429                         err = tg3_bmcr_reset(tp);
2430                         if (err)
2431                                 return err;
2432                         do_phy_reset = 0;
2433                 }
2434
2435                 /* Disable transmitter and interrupt.  */
2436                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2437                         continue;
2438
2439                 reg32 |= 0x3000;
2440                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2441
2442                 /* Set full-duplex, 1000 mbps.  */
2443                 tg3_writephy(tp, MII_BMCR,
2444                              BMCR_FULLDPLX | BMCR_SPEED1000);
2445
2446                 /* Set to master mode.  */
2447                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2448                         continue;
2449
2450                 tg3_writephy(tp, MII_CTRL1000,
2451                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2452
2453                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2454                 if (err)
2455                         return err;
2456
2457                 /* Block the PHY control access.  */
2458                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2459
2460                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2461                 if (!err)
2462                         break;
2463         } while (--retries);
2464
2465         err = tg3_phy_reset_chanpat(tp);
2466         if (err)
2467                 return err;
2468
2469         tg3_phydsp_write(tp, 0x8005, 0x0000);
2470
2471         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2472         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2473
2474         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2475
2476         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2477
2478         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2479                 reg32 &= ~0x3000;
2480                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2481         } else if (!err)
2482                 err = -EBUSY;
2483
2484         return err;
2485 }
2486
2487 static void tg3_carrier_on(struct tg3 *tp)
2488 {
2489         netif_carrier_on(tp->dev);
2490         tp->link_up = true;
2491 }
2492
2493 static void tg3_carrier_off(struct tg3 *tp)
2494 {
2495         netif_carrier_off(tp->dev);
2496         tp->link_up = false;
2497 }
2498
2499 /* This will reset the tigon3 PHY if there is no valid
2500  * link unless the FORCE argument is non-zero.
2501  */
2502 static int tg3_phy_reset(struct tg3 *tp)
2503 {
2504         u32 val, cpmuctrl;
2505         int err;
2506
2507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2508                 val = tr32(GRC_MISC_CFG);
2509                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2510                 udelay(40);
2511         }
2512         err  = tg3_readphy(tp, MII_BMSR, &val);
2513         err |= tg3_readphy(tp, MII_BMSR, &val);
2514         if (err != 0)
2515                 return -EBUSY;
2516
2517         if (netif_running(tp->dev) && tp->link_up) {
2518                 tg3_carrier_off(tp);
2519                 tg3_link_report(tp);
2520         }
2521
2522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2525                 err = tg3_phy_reset_5703_4_5(tp);
2526                 if (err)
2527                         return err;
2528                 goto out;
2529         }
2530
2531         cpmuctrl = 0;
2532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2533             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2534                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2535                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2536                         tw32(TG3_CPMU_CTRL,
2537                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2538         }
2539
2540         err = tg3_bmcr_reset(tp);
2541         if (err)
2542                 return err;
2543
2544         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2545                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2546                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2547
2548                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2549         }
2550
2551         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2552             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2553                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2554                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2555                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2556                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2557                         udelay(40);
2558                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2559                 }
2560         }
2561
2562         if (tg3_flag(tp, 5717_PLUS) &&
2563             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2564                 return 0;
2565
2566         tg3_phy_apply_otp(tp);
2567
2568         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2569                 tg3_phy_toggle_apd(tp, true);
2570         else
2571                 tg3_phy_toggle_apd(tp, false);
2572
2573 out:
2574         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2575             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2576                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2577                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2578                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2579         }
2580
2581         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2582                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2583                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2584         }
2585
2586         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2587                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2588                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2589                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2590                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2591                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2592                 }
2593         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2594                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2595                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2596                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2597                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2598                                 tg3_writephy(tp, MII_TG3_TEST1,
2599                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2600                         } else
2601                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2602
2603                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2604                 }
2605         }
2606
2607         /* Set Extended packet length bit (bit 14) on all chips that */
2608         /* support jumbo frames */
2609         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2610                 /* Cannot do read-modify-write on 5401 */
2611                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2612         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2613                 /* Set bit 14 with read-modify-write to preserve other bits */
2614                 err = tg3_phy_auxctl_read(tp,
2615                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2616                 if (!err)
2617                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2618                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2619         }
2620
2621         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2622          * jumbo frames transmission.
2623          */
2624         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2625                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2626                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2627                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2631                 /* adjust output voltage */
2632                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2633         }
2634
2635         tg3_phy_toggle_automdix(tp, 1);
2636         tg3_phy_set_wirespeed(tp);
2637         return 0;
2638 }
2639
2640 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2641 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2642 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2643                                           TG3_GPIO_MSG_NEED_VAUX)
2644 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2645         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2646          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2647          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2648          (TG3_GPIO_MSG_DRVR_PRES << 12))
2649
2650 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2651         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2652          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2653          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2654          (TG3_GPIO_MSG_NEED_VAUX << 12))
2655
2656 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2657 {
2658         u32 status, shift;
2659
2660         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2662                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2663         else
2664                 status = tr32(TG3_CPMU_DRV_STATUS);
2665
2666         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2667         status &= ~(TG3_GPIO_MSG_MASK << shift);
2668         status |= (newstat << shift);
2669
2670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2672                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2673         else
2674                 tw32(TG3_CPMU_DRV_STATUS, status);
2675
2676         return status >> TG3_APE_GPIO_MSG_SHIFT;
2677 }
2678
2679 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2680 {
2681         if (!tg3_flag(tp, IS_NIC))
2682                 return 0;
2683
2684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2688                         return -EIO;
2689
2690                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2691
2692                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694
2695                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2696         } else {
2697                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2698                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2699         }
2700
2701         return 0;
2702 }
2703
2704 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2705 {
2706         u32 grc_local_ctrl;
2707
2708         if (!tg3_flag(tp, IS_NIC) ||
2709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2711                 return;
2712
2713         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2714
2715         tw32_wait_f(GRC_LOCAL_CTRL,
2716                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2717                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2718
2719         tw32_wait_f(GRC_LOCAL_CTRL,
2720                     grc_local_ctrl,
2721                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2722
2723         tw32_wait_f(GRC_LOCAL_CTRL,
2724                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2725                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2726 }
2727
2728 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2729 {
2730         if (!tg3_flag(tp, IS_NIC))
2731                 return;
2732
2733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2734             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2735                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2736                             (GRC_LCLCTRL_GPIO_OE0 |
2737                              GRC_LCLCTRL_GPIO_OE1 |
2738                              GRC_LCLCTRL_GPIO_OE2 |
2739                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2740                              GRC_LCLCTRL_GPIO_OUTPUT1),
2741                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2742         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2743                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2744                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2745                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2746                                      GRC_LCLCTRL_GPIO_OE1 |
2747                                      GRC_LCLCTRL_GPIO_OE2 |
2748                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2749                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2750                                      tp->grc_local_ctrl;
2751                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2752                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2753
2754                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2755                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2756                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2757
2758                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2759                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2760                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2761         } else {
2762                 u32 no_gpio2;
2763                 u32 grc_local_ctrl = 0;
2764
2765                 /* Workaround to prevent overdrawing Amps. */
2766                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2767                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2768                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2769                                     grc_local_ctrl,
2770                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771                 }
2772
2773                 /* On 5753 and variants, GPIO2 cannot be used. */
2774                 no_gpio2 = tp->nic_sram_data_cfg &
2775                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2776
2777                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2778                                   GRC_LCLCTRL_GPIO_OE1 |
2779                                   GRC_LCLCTRL_GPIO_OE2 |
2780                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2781                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2782                 if (no_gpio2) {
2783                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2784                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2785                 }
2786                 tw32_wait_f(GRC_LOCAL_CTRL,
2787                             tp->grc_local_ctrl | grc_local_ctrl,
2788                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2789
2790                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2791
2792                 tw32_wait_f(GRC_LOCAL_CTRL,
2793                             tp->grc_local_ctrl | grc_local_ctrl,
2794                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796                 if (!no_gpio2) {
2797                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2798                         tw32_wait_f(GRC_LOCAL_CTRL,
2799                                     tp->grc_local_ctrl | grc_local_ctrl,
2800                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2801                 }
2802         }
2803 }
2804
2805 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2806 {
2807         u32 msg = 0;
2808
2809         /* Serialize power state transitions */
2810         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2811                 return;
2812
2813         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2814                 msg = TG3_GPIO_MSG_NEED_VAUX;
2815
2816         msg = tg3_set_function_status(tp, msg);
2817
2818         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2819                 goto done;
2820
2821         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2822                 tg3_pwrsrc_switch_to_vaux(tp);
2823         else
2824                 tg3_pwrsrc_die_with_vmain(tp);
2825
2826 done:
2827         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2828 }
2829
2830 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2831 {
2832         bool need_vaux = false;
2833
2834         /* The GPIOs do something completely different on 57765. */
2835         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2836                 return;
2837
2838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2840             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2841                 tg3_frob_aux_power_5717(tp, include_wol ?
2842                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2843                 return;
2844         }
2845
2846         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2847                 struct net_device *dev_peer;
2848
2849                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2850
2851                 /* remove_one() may have been run on the peer. */
2852                 if (dev_peer) {
2853                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2854
2855                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2856                                 return;
2857
2858                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2859                             tg3_flag(tp_peer, ENABLE_ASF))
2860                                 need_vaux = true;
2861                 }
2862         }
2863
2864         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2865             tg3_flag(tp, ENABLE_ASF))
2866                 need_vaux = true;
2867
2868         if (need_vaux)
2869                 tg3_pwrsrc_switch_to_vaux(tp);
2870         else
2871                 tg3_pwrsrc_die_with_vmain(tp);
2872 }
2873
2874 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2875 {
2876         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2877                 return 1;
2878         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2879                 if (speed != SPEED_10)
2880                         return 1;
2881         } else if (speed == SPEED_10)
2882                 return 1;
2883
2884         return 0;
2885 }
2886
2887 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2888 {
2889         u32 val;
2890
2891         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2892                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2893                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2894                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2895
2896                         sg_dig_ctrl |=
2897                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2898                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2899                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2900                 }
2901                 return;
2902         }
2903
2904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2905                 tg3_bmcr_reset(tp);
2906                 val = tr32(GRC_MISC_CFG);
2907                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2908                 udelay(40);
2909                 return;
2910         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2911                 u32 phytest;
2912                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2913                         u32 phy;
2914
2915                         tg3_writephy(tp, MII_ADVERTISE, 0);
2916                         tg3_writephy(tp, MII_BMCR,
2917                                      BMCR_ANENABLE | BMCR_ANRESTART);
2918
2919                         tg3_writephy(tp, MII_TG3_FET_TEST,
2920                                      phytest | MII_TG3_FET_SHADOW_EN);
2921                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2922                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2923                                 tg3_writephy(tp,
2924                                              MII_TG3_FET_SHDW_AUXMODE4,
2925                                              phy);
2926                         }
2927                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2928                 }
2929                 return;
2930         } else if (do_low_power) {
2931                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2932                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2933
2934                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2935                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2936                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2937                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2938         }
2939
2940         /* The PHY should not be powered down on some chips because
2941          * of bugs.
2942          */
2943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2945             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2946              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2947             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2948              !tp->pci_fn))
2949                 return;
2950
2951         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2952             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2953                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2954                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2955                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2956                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2957         }
2958
2959         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2960 }
2961
2962 /* tp->lock is held. */
2963 static int tg3_nvram_lock(struct tg3 *tp)
2964 {
2965         if (tg3_flag(tp, NVRAM)) {
2966                 int i;
2967
2968                 if (tp->nvram_lock_cnt == 0) {
2969                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2970                         for (i = 0; i < 8000; i++) {
2971                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2972                                         break;
2973                                 udelay(20);
2974                         }
2975                         if (i == 8000) {
2976                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2977                                 return -ENODEV;
2978                         }
2979                 }
2980                 tp->nvram_lock_cnt++;
2981         }
2982         return 0;
2983 }
2984
2985 /* tp->lock is held. */
2986 static void tg3_nvram_unlock(struct tg3 *tp)
2987 {
2988         if (tg3_flag(tp, NVRAM)) {
2989                 if (tp->nvram_lock_cnt > 0)
2990                         tp->nvram_lock_cnt--;
2991                 if (tp->nvram_lock_cnt == 0)
2992                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2993         }
2994 }
2995
2996 /* tp->lock is held. */
2997 static void tg3_enable_nvram_access(struct tg3 *tp)
2998 {
2999         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3000                 u32 nvaccess = tr32(NVRAM_ACCESS);
3001
3002                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3003         }
3004 }
3005
3006 /* tp->lock is held. */
3007 static void tg3_disable_nvram_access(struct tg3 *tp)
3008 {
3009         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3010                 u32 nvaccess = tr32(NVRAM_ACCESS);
3011
3012                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3013         }
3014 }
3015
3016 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3017                                         u32 offset, u32 *val)
3018 {
3019         u32 tmp;
3020         int i;
3021
3022         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3023                 return -EINVAL;
3024
3025         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3026                                         EEPROM_ADDR_DEVID_MASK |
3027                                         EEPROM_ADDR_READ);
3028         tw32(GRC_EEPROM_ADDR,
3029              tmp |
3030              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3031              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3032               EEPROM_ADDR_ADDR_MASK) |
3033              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3034
3035         for (i = 0; i < 1000; i++) {
3036                 tmp = tr32(GRC_EEPROM_ADDR);
3037
3038                 if (tmp & EEPROM_ADDR_COMPLETE)
3039                         break;
3040                 msleep(1);
3041         }
3042         if (!(tmp & EEPROM_ADDR_COMPLETE))
3043                 return -EBUSY;
3044
3045         tmp = tr32(GRC_EEPROM_DATA);
3046
3047         /*
3048          * The data will always be opposite the native endian
3049          * format.  Perform a blind byteswap to compensate.
3050          */
3051         *val = swab32(tmp);
3052
3053         return 0;
3054 }
3055
3056 #define NVRAM_CMD_TIMEOUT 10000
3057
3058 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3059 {
3060         int i;
3061
3062         tw32(NVRAM_CMD, nvram_cmd);
3063         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3064                 udelay(10);
3065                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3066                         udelay(10);
3067                         break;
3068                 }
3069         }
3070
3071         if (i == NVRAM_CMD_TIMEOUT)
3072                 return -EBUSY;
3073
3074         return 0;
3075 }
3076
3077 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3078 {
3079         if (tg3_flag(tp, NVRAM) &&
3080             tg3_flag(tp, NVRAM_BUFFERED) &&
3081             tg3_flag(tp, FLASH) &&
3082             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3083             (tp->nvram_jedecnum == JEDEC_ATMEL))
3084
3085                 addr = ((addr / tp->nvram_pagesize) <<
3086                         ATMEL_AT45DB0X1B_PAGE_POS) +
3087                        (addr % tp->nvram_pagesize);
3088
3089         return addr;
3090 }
3091
3092 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3093 {
3094         if (tg3_flag(tp, NVRAM) &&
3095             tg3_flag(tp, NVRAM_BUFFERED) &&
3096             tg3_flag(tp, FLASH) &&
3097             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3098             (tp->nvram_jedecnum == JEDEC_ATMEL))
3099
3100                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3101                         tp->nvram_pagesize) +
3102                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3103
3104         return addr;
3105 }
3106
3107 /* NOTE: Data read in from NVRAM is byteswapped according to
3108  * the byteswapping settings for all other register accesses.
3109  * tg3 devices are BE devices, so on a BE machine, the data
3110  * returned will be exactly as it is seen in NVRAM.  On a LE
3111  * machine, the 32-bit value will be byteswapped.
3112  */
3113 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3114 {
3115         int ret;
3116
3117         if (!tg3_flag(tp, NVRAM))
3118                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3119
3120         offset = tg3_nvram_phys_addr(tp, offset);
3121
3122         if (offset > NVRAM_ADDR_MSK)
3123                 return -EINVAL;
3124
3125         ret = tg3_nvram_lock(tp);
3126         if (ret)
3127                 return ret;
3128
3129         tg3_enable_nvram_access(tp);
3130
3131         tw32(NVRAM_ADDR, offset);
3132         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3133                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3134
3135         if (ret == 0)
3136                 *val = tr32(NVRAM_RDDATA);
3137
3138         tg3_disable_nvram_access(tp);
3139
3140         tg3_nvram_unlock(tp);
3141
3142         return ret;
3143 }
3144
3145 /* Ensures NVRAM data is in bytestream format. */
3146 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3147 {
3148         u32 v;
3149         int res = tg3_nvram_read(tp, offset, &v);
3150         if (!res)
3151                 *val = cpu_to_be32(v);
3152         return res;
3153 }
3154
3155 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3156                                     u32 offset, u32 len, u8 *buf)
3157 {
3158         int i, j, rc = 0;
3159         u32 val;
3160
3161         for (i = 0; i < len; i += 4) {
3162                 u32 addr;
3163                 __be32 data;
3164
3165                 addr = offset + i;
3166
3167                 memcpy(&data, buf + i, 4);
3168
3169                 /*
3170                  * The SEEPROM interface expects the data to always be opposite
3171                  * the native endian format.  We accomplish this by reversing
3172                  * all the operations that would have been performed on the
3173                  * data from a call to tg3_nvram_read_be32().
3174                  */
3175                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3176
3177                 val = tr32(GRC_EEPROM_ADDR);
3178                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3179
3180                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3181                         EEPROM_ADDR_READ);
3182                 tw32(GRC_EEPROM_ADDR, val |
3183                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3184                         (addr & EEPROM_ADDR_ADDR_MASK) |
3185                         EEPROM_ADDR_START |
3186                         EEPROM_ADDR_WRITE);
3187
3188                 for (j = 0; j < 1000; j++) {
3189                         val = tr32(GRC_EEPROM_ADDR);
3190
3191                         if (val & EEPROM_ADDR_COMPLETE)
3192                                 break;
3193                         msleep(1);
3194                 }
3195                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3196                         rc = -EBUSY;
3197                         break;
3198                 }
3199         }
3200
3201         return rc;
3202 }
3203
3204 /* offset and length are dword aligned */
3205 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3206                 u8 *buf)
3207 {
3208         int ret = 0;
3209         u32 pagesize = tp->nvram_pagesize;
3210         u32 pagemask = pagesize - 1;
3211         u32 nvram_cmd;
3212         u8 *tmp;
3213
3214         tmp = kmalloc(pagesize, GFP_KERNEL);
3215         if (tmp == NULL)
3216                 return -ENOMEM;
3217
3218         while (len) {
3219                 int j;
3220                 u32 phy_addr, page_off, size;
3221
3222                 phy_addr = offset & ~pagemask;
3223
3224                 for (j = 0; j < pagesize; j += 4) {
3225                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3226                                                   (__be32 *) (tmp + j));
3227                         if (ret)
3228                                 break;
3229                 }
3230                 if (ret)
3231                         break;
3232
3233                 page_off = offset & pagemask;
3234                 size = pagesize;
3235                 if (len < size)
3236                         size = len;
3237
3238                 len -= size;
3239
3240                 memcpy(tmp + page_off, buf, size);
3241
3242                 offset = offset + (pagesize - page_off);
3243
3244                 tg3_enable_nvram_access(tp);
3245
3246                 /*
3247                  * Before we can erase the flash page, we need
3248                  * to issue a special "write enable" command.
3249                  */
3250                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3251
3252                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3253                         break;
3254
3255                 /* Erase the target page */
3256                 tw32(NVRAM_ADDR, phy_addr);
3257
3258                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3259                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3260
3261                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3262                         break;
3263
3264                 /* Issue another write enable to start the write. */
3265                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3266
3267                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3268                         break;
3269
3270                 for (j = 0; j < pagesize; j += 4) {
3271                         __be32 data;
3272
3273                         data = *((__be32 *) (tmp + j));
3274
3275                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3276
3277                         tw32(NVRAM_ADDR, phy_addr + j);
3278
3279                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3280                                 NVRAM_CMD_WR;
3281
3282                         if (j == 0)
3283                                 nvram_cmd |= NVRAM_CMD_FIRST;
3284                         else if (j == (pagesize - 4))
3285                                 nvram_cmd |= NVRAM_CMD_LAST;
3286
3287                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3288                         if (ret)
3289                                 break;
3290                 }
3291                 if (ret)
3292                         break;
3293         }
3294
3295         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3296         tg3_nvram_exec_cmd(tp, nvram_cmd);
3297
3298         kfree(tmp);
3299
3300         return ret;
3301 }
3302
3303 /* offset and length are dword aligned */
3304 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3305                 u8 *buf)
3306 {
3307         int i, ret = 0;
3308
3309         for (i = 0; i < len; i += 4, offset += 4) {
3310                 u32 page_off, phy_addr, nvram_cmd;
3311                 __be32 data;
3312
3313                 memcpy(&data, buf + i, 4);
3314                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3315
3316                 page_off = offset % tp->nvram_pagesize;
3317
3318                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3319
3320                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3321
3322                 if (page_off == 0 || i == 0)
3323                         nvram_cmd |= NVRAM_CMD_FIRST;
3324                 if (page_off == (tp->nvram_pagesize - 4))
3325                         nvram_cmd |= NVRAM_CMD_LAST;
3326
3327                 if (i == (len - 4))
3328                         nvram_cmd |= NVRAM_CMD_LAST;
3329
3330                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3331                     !tg3_flag(tp, FLASH) ||
3332                     !tg3_flag(tp, 57765_PLUS))
3333                         tw32(NVRAM_ADDR, phy_addr);
3334
3335                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3336                     !tg3_flag(tp, 5755_PLUS) &&
3337                     (tp->nvram_jedecnum == JEDEC_ST) &&
3338                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3339                         u32 cmd;
3340
3341                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3342                         ret = tg3_nvram_exec_cmd(tp, cmd);
3343                         if (ret)
3344                                 break;
3345                 }
3346                 if (!tg3_flag(tp, FLASH)) {
3347                         /* We always do complete word writes to eeprom. */
3348                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3349                 }
3350
3351                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3352                 if (ret)
3353                         break;
3354         }
3355         return ret;
3356 }
3357
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3360 {
3361         int ret;
3362
3363         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3364                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3365                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3366                 udelay(40);
3367         }
3368
3369         if (!tg3_flag(tp, NVRAM)) {
3370                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3371         } else {
3372                 u32 grc_mode;
3373
3374                 ret = tg3_nvram_lock(tp);
3375                 if (ret)
3376                         return ret;
3377
3378                 tg3_enable_nvram_access(tp);
3379                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3380                         tw32(NVRAM_WRITE1, 0x406);
3381
3382                 grc_mode = tr32(GRC_MODE);
3383                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3384
3385                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3386                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3387                                 buf);
3388                 } else {
3389                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3390                                 buf);
3391                 }
3392
3393                 grc_mode = tr32(GRC_MODE);
3394                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3395
3396                 tg3_disable_nvram_access(tp);
3397                 tg3_nvram_unlock(tp);
3398         }
3399
3400         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3401                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3402                 udelay(40);
3403         }
3404
3405         return ret;
3406 }
3407
3408 #define RX_CPU_SCRATCH_BASE     0x30000
3409 #define RX_CPU_SCRATCH_SIZE     0x04000
3410 #define TX_CPU_SCRATCH_BASE     0x34000
3411 #define TX_CPU_SCRATCH_SIZE     0x04000
3412
3413 /* tp->lock is held. */
3414 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3415 {
3416         int i;
3417
3418         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3419
3420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3421                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3422
3423                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3424                 return 0;
3425         }
3426         if (offset == RX_CPU_BASE) {
3427                 for (i = 0; i < 10000; i++) {
3428                         tw32(offset + CPU_STATE, 0xffffffff);
3429                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3430                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3431                                 break;
3432                 }
3433
3434                 tw32(offset + CPU_STATE, 0xffffffff);
3435                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3436                 udelay(10);
3437         } else {
3438                 for (i = 0; i < 10000; i++) {
3439                         tw32(offset + CPU_STATE, 0xffffffff);
3440                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3441                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3442                                 break;
3443                 }
3444         }
3445
3446         if (i >= 10000) {
3447                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3448                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3449                 return -ENODEV;
3450         }
3451
3452         /* Clear firmware's nvram arbitration. */
3453         if (tg3_flag(tp, NVRAM))
3454                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3455         return 0;
3456 }
3457
3458 struct fw_info {
3459         unsigned int fw_base;
3460         unsigned int fw_len;
3461         const __be32 *fw_data;
3462 };
3463
3464 /* tp->lock is held. */
3465 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3466                                  u32 cpu_scratch_base, int cpu_scratch_size,
3467                                  struct fw_info *info)
3468 {
3469         int err, lock_err, i;
3470         void (*write_op)(struct tg3 *, u32, u32);
3471
3472         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3473                 netdev_err(tp->dev,
3474                            "%s: Trying to load TX cpu firmware which is 5705\n",
3475                            __func__);
3476                 return -EINVAL;
3477         }
3478
3479         if (tg3_flag(tp, 5705_PLUS))
3480                 write_op = tg3_write_mem;
3481         else
3482                 write_op = tg3_write_indirect_reg32;
3483
3484         /* It is possible that bootcode is still loading at this point.
3485          * Get the nvram lock first before halting the cpu.
3486          */
3487         lock_err = tg3_nvram_lock(tp);
3488         err = tg3_halt_cpu(tp, cpu_base);
3489         if (!lock_err)
3490                 tg3_nvram_unlock(tp);
3491         if (err)
3492                 goto out;
3493
3494         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3495                 write_op(tp, cpu_scratch_base + i, 0);
3496         tw32(cpu_base + CPU_STATE, 0xffffffff);
3497         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3498         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3499                 write_op(tp, (cpu_scratch_base +
3500                               (info->fw_base & 0xffff) +
3501                               (i * sizeof(u32))),
3502                               be32_to_cpu(info->fw_data[i]));
3503
3504         err = 0;
3505
3506 out:
3507         return err;
3508 }
3509
3510 /* tp->lock is held. */
3511 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3512 {
3513         struct fw_info info;
3514         const __be32 *fw_data;
3515         int err, i;
3516
3517         fw_data = (void *)tp->fw->data;
3518
3519         /* Firmware blob starts with version numbers, followed by
3520            start address and length. We are setting complete length.
3521            length = end_address_of_bss - start_address_of_text.
3522            Remainder is the blob to be loaded contiguously
3523            from start address. */
3524
3525         info.fw_base = be32_to_cpu(fw_data[1]);
3526         info.fw_len = tp->fw->size - 12;
3527         info.fw_data = &fw_data[3];
3528
3529         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3530                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3531                                     &info);
3532         if (err)
3533                 return err;
3534
3535         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3536                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3537                                     &info);
3538         if (err)
3539                 return err;
3540
3541         /* Now startup only the RX cpu. */
3542         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3543         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3544
3545         for (i = 0; i < 5; i++) {
3546                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3547                         break;
3548                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3549                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3550                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3551                 udelay(1000);
3552         }
3553         if (i >= 5) {
3554                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3555                            "should be %08x\n", __func__,
3556                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3557                 return -ENODEV;
3558         }
3559         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3560         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3561
3562         return 0;
3563 }
3564
3565 /* tp->lock is held. */
3566 static int tg3_load_tso_firmware(struct tg3 *tp)
3567 {
3568         struct fw_info info;
3569         const __be32 *fw_data;
3570         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3571         int err, i;
3572
3573         if (tg3_flag(tp, HW_TSO_1) ||
3574             tg3_flag(tp, HW_TSO_2) ||
3575             tg3_flag(tp, HW_TSO_3))
3576                 return 0;
3577
3578         fw_data = (void *)tp->fw->data;
3579
3580         /* Firmware blob starts with version numbers, followed by
3581            start address and length. We are setting complete length.
3582            length = end_address_of_bss - start_address_of_text.
3583            Remainder is the blob to be loaded contiguously
3584            from start address. */
3585
3586         info.fw_base = be32_to_cpu(fw_data[1]);
3587         cpu_scratch_size = tp->fw_len;
3588         info.fw_len = tp->fw->size - 12;
3589         info.fw_data = &fw_data[3];
3590
3591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3592                 cpu_base = RX_CPU_BASE;
3593                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3594         } else {
3595                 cpu_base = TX_CPU_BASE;
3596                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3597                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3598         }
3599
3600         err = tg3_load_firmware_cpu(tp, cpu_base,
3601                                     cpu_scratch_base, cpu_scratch_size,
3602                                     &info);
3603         if (err)
3604                 return err;
3605
3606         /* Now startup the cpu. */
3607         tw32(cpu_base + CPU_STATE, 0xffffffff);
3608         tw32_f(cpu_base + CPU_PC, info.fw_base);
3609
3610         for (i = 0; i < 5; i++) {
3611                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3612                         break;
3613                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3614                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3615                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3616                 udelay(1000);
3617         }
3618         if (i >= 5) {
3619                 netdev_err(tp->dev,
3620                            "%s fails to set CPU PC, is %08x should be %08x\n",
3621                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3622                 return -ENODEV;
3623         }
3624         tw32(cpu_base + CPU_STATE, 0xffffffff);
3625         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626         return 0;
3627 }
3628
3629
3630 /* tp->lock is held. */
3631 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3632 {
3633         u32 addr_high, addr_low;
3634         int i;
3635
3636         addr_high = ((tp->dev->dev_addr[0] << 8) |
3637                      tp->dev->dev_addr[1]);
3638         addr_low = ((tp->dev->dev_addr[2] << 24) |
3639                     (tp->dev->dev_addr[3] << 16) |
3640                     (tp->dev->dev_addr[4] <<  8) |
3641                     (tp->dev->dev_addr[5] <<  0));
3642         for (i = 0; i < 4; i++) {
3643                 if (i == 1 && skip_mac_1)
3644                         continue;
3645                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3646                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3647         }
3648
3649         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3650             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3651                 for (i = 0; i < 12; i++) {
3652                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3653                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3654                 }
3655         }
3656
3657         addr_high = (tp->dev->dev_addr[0] +
3658                      tp->dev->dev_addr[1] +
3659                      tp->dev->dev_addr[2] +
3660                      tp->dev->dev_addr[3] +
3661                      tp->dev->dev_addr[4] +
3662                      tp->dev->dev_addr[5]) &
3663                 TX_BACKOFF_SEED_MASK;
3664         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3665 }
3666
3667 static void tg3_enable_register_access(struct tg3 *tp)
3668 {
3669         /*
3670          * Make sure register accesses (indirect or otherwise) will function
3671          * correctly.
3672          */
3673         pci_write_config_dword(tp->pdev,
3674                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3675 }
3676
3677 static int tg3_power_up(struct tg3 *tp)
3678 {
3679         int err;
3680
3681         tg3_enable_register_access(tp);
3682
3683         err = pci_set_power_state(tp->pdev, PCI_D0);
3684         if (!err) {
3685                 /* Switch out of Vaux if it is a NIC */
3686                 tg3_pwrsrc_switch_to_vmain(tp);
3687         } else {
3688                 netdev_err(tp->dev, "Transition to D0 failed\n");
3689         }
3690
3691         return err;
3692 }
3693
3694 static int tg3_setup_phy(struct tg3 *, int);
3695
3696 static int tg3_power_down_prepare(struct tg3 *tp)
3697 {
3698         u32 misc_host_ctrl;
3699         bool device_should_wake, do_low_power;
3700
3701         tg3_enable_register_access(tp);
3702
3703         /* Restore the CLKREQ setting. */
3704         if (tg3_flag(tp, CLKREQ_BUG))
3705                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3706                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3707
3708         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3709         tw32(TG3PCI_MISC_HOST_CTRL,
3710              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3711
3712         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3713                              tg3_flag(tp, WOL_ENABLE);
3714
3715         if (tg3_flag(tp, USE_PHYLIB)) {
3716                 do_low_power = false;
3717                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3718                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3719                         struct phy_device *phydev;
3720                         u32 phyid, advertising;
3721
3722                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3723
3724                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3725
3726                         tp->link_config.speed = phydev->speed;
3727                         tp->link_config.duplex = phydev->duplex;
3728                         tp->link_config.autoneg = phydev->autoneg;
3729                         tp->link_config.advertising = phydev->advertising;
3730
3731                         advertising = ADVERTISED_TP |
3732                                       ADVERTISED_Pause |
3733                                       ADVERTISED_Autoneg |
3734                                       ADVERTISED_10baseT_Half;
3735
3736                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3737                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3738                                         advertising |=
3739                                                 ADVERTISED_100baseT_Half |
3740                                                 ADVERTISED_100baseT_Full |
3741                                                 ADVERTISED_10baseT_Full;
3742                                 else
3743                                         advertising |= ADVERTISED_10baseT_Full;
3744                         }
3745
3746                         phydev->advertising = advertising;
3747
3748                         phy_start_aneg(phydev);
3749
3750                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3751                         if (phyid != PHY_ID_BCMAC131) {
3752                                 phyid &= PHY_BCM_OUI_MASK;
3753                                 if (phyid == PHY_BCM_OUI_1 ||
3754                                     phyid == PHY_BCM_OUI_2 ||
3755                                     phyid == PHY_BCM_OUI_3)
3756                                         do_low_power = true;
3757                         }
3758                 }
3759         } else {
3760                 do_low_power = true;
3761
3762                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3763                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3764
3765                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3766                         tg3_setup_phy(tp, 0);
3767         }
3768
3769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3770                 u32 val;
3771
3772                 val = tr32(GRC_VCPU_EXT_CTRL);
3773                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3774         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3775                 int i;
3776                 u32 val;
3777
3778                 for (i = 0; i < 200; i++) {
3779                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3780                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3781                                 break;
3782                         msleep(1);
3783                 }
3784         }
3785         if (tg3_flag(tp, WOL_CAP))
3786                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3787                                                      WOL_DRV_STATE_SHUTDOWN |
3788                                                      WOL_DRV_WOL |
3789                                                      WOL_SET_MAGIC_PKT);
3790
3791         if (device_should_wake) {
3792                 u32 mac_mode;
3793
3794                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3795                         if (do_low_power &&
3796                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3797                                 tg3_phy_auxctl_write(tp,
3798                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3799                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3800                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3801                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3802                                 udelay(40);
3803                         }
3804
3805                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3806                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3807                         else
3808                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3809
3810                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3812                             ASIC_REV_5700) {
3813                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3814                                              SPEED_100 : SPEED_10;
3815                                 if (tg3_5700_link_polarity(tp, speed))
3816                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3817                                 else
3818                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3819                         }
3820                 } else {
3821                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3822                 }
3823
3824                 if (!tg3_flag(tp, 5750_PLUS))
3825                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3826
3827                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3828                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3829                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3830                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3831
3832                 if (tg3_flag(tp, ENABLE_APE))
3833                         mac_mode |= MAC_MODE_APE_TX_EN |
3834                                     MAC_MODE_APE_RX_EN |
3835                                     MAC_MODE_TDE_ENABLE;
3836
3837                 tw32_f(MAC_MODE, mac_mode);
3838                 udelay(100);
3839
3840                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3841                 udelay(10);
3842         }
3843
3844         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3845             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3847                 u32 base_val;
3848
3849                 base_val = tp->pci_clock_ctrl;
3850                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3851                              CLOCK_CTRL_TXCLK_DISABLE);
3852
3853                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3854                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3855         } else if (tg3_flag(tp, 5780_CLASS) ||
3856                    tg3_flag(tp, CPMU_PRESENT) ||
3857                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3858                 /* do nothing */
3859         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3860                 u32 newbits1, newbits2;
3861
3862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3863                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3864                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3865                                     CLOCK_CTRL_TXCLK_DISABLE |
3866                                     CLOCK_CTRL_ALTCLK);
3867                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3868                 } else if (tg3_flag(tp, 5705_PLUS)) {
3869                         newbits1 = CLOCK_CTRL_625_CORE;
3870                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3871                 } else {
3872                         newbits1 = CLOCK_CTRL_ALTCLK;
3873                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3874                 }
3875
3876                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3877                             40);
3878
3879                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3880                             40);
3881
3882                 if (!tg3_flag(tp, 5705_PLUS)) {
3883                         u32 newbits3;
3884
3885                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3886                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3887                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3888                                             CLOCK_CTRL_TXCLK_DISABLE |
3889                                             CLOCK_CTRL_44MHZ_CORE);
3890                         } else {
3891                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3892                         }
3893
3894                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3895                                     tp->pci_clock_ctrl | newbits3, 40);
3896                 }
3897         }
3898
3899         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3900                 tg3_power_down_phy(tp, do_low_power);
3901
3902         tg3_frob_aux_power(tp, true);
3903
3904         /* Workaround for unstable PLL clock */
3905         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3906             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3907                 u32 val = tr32(0x7d00);
3908
3909                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3910                 tw32(0x7d00, val);
3911                 if (!tg3_flag(tp, ENABLE_ASF)) {
3912                         int err;
3913
3914                         err = tg3_nvram_lock(tp);
3915                         tg3_halt_cpu(tp, RX_CPU_BASE);
3916                         if (!err)
3917                                 tg3_nvram_unlock(tp);
3918                 }
3919         }
3920
3921         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3922
3923         return 0;
3924 }
3925
3926 static void tg3_power_down(struct tg3 *tp)
3927 {
3928         tg3_power_down_prepare(tp);
3929
3930         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3931         pci_set_power_state(tp->pdev, PCI_D3hot);
3932 }
3933
3934 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3935 {
3936         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3937         case MII_TG3_AUX_STAT_10HALF:
3938                 *speed = SPEED_10;
3939                 *duplex = DUPLEX_HALF;
3940                 break;
3941
3942         case MII_TG3_AUX_STAT_10FULL:
3943                 *speed = SPEED_10;
3944                 *duplex = DUPLEX_FULL;
3945                 break;
3946
3947         case MII_TG3_AUX_STAT_100HALF:
3948                 *speed = SPEED_100;
3949                 *duplex = DUPLEX_HALF;
3950                 break;
3951
3952         case MII_TG3_AUX_STAT_100FULL:
3953                 *speed = SPEED_100;
3954                 *duplex = DUPLEX_FULL;
3955                 break;
3956
3957         case MII_TG3_AUX_STAT_1000HALF:
3958                 *speed = SPEED_1000;
3959                 *duplex = DUPLEX_HALF;
3960                 break;
3961
3962         case MII_TG3_AUX_STAT_1000FULL:
3963                 *speed = SPEED_1000;
3964                 *duplex = DUPLEX_FULL;
3965                 break;
3966
3967         default:
3968                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3969                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3970                                  SPEED_10;
3971                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3972                                   DUPLEX_HALF;
3973                         break;
3974                 }
3975                 *speed = SPEED_UNKNOWN;
3976                 *duplex = DUPLEX_UNKNOWN;
3977                 break;
3978         }
3979 }
3980
3981 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3982 {
3983         int err = 0;
3984         u32 val, new_adv;
3985
3986         new_adv = ADVERTISE_CSMA;
3987         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3988         new_adv |= mii_advertise_flowctrl(flowctrl);
3989
3990         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3991         if (err)
3992                 goto done;
3993
3994         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3995                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3996
3997                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3998                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3999                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4000
4001                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4002                 if (err)
4003                         goto done;
4004         }
4005
4006         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4007                 goto done;
4008
4009         tw32(TG3_CPMU_EEE_MODE,
4010              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4011
4012         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4013         if (!err) {
4014                 u32 err2;
4015
4016                 val = 0;
4017                 /* Advertise 100-BaseTX EEE ability */
4018                 if (advertise & ADVERTISED_100baseT_Full)
4019                         val |= MDIO_AN_EEE_ADV_100TX;
4020                 /* Advertise 1000-BaseT EEE ability */
4021                 if (advertise & ADVERTISED_1000baseT_Full)
4022                         val |= MDIO_AN_EEE_ADV_1000T;
4023                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4024                 if (err)
4025                         val = 0;
4026
4027                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4028                 case ASIC_REV_5717:
4029                 case ASIC_REV_57765:
4030                 case ASIC_REV_57766:
4031                 case ASIC_REV_5719:
4032                         /* If we advertised any eee advertisements above... */
4033                         if (val)
4034                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4035                                       MII_TG3_DSP_TAP26_RMRXSTO |
4036                                       MII_TG3_DSP_TAP26_OPCSINPT;
4037                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4038                         /* Fall through */
4039                 case ASIC_REV_5720:
4040                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4041                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4042                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4043                 }
4044
4045                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4046                 if (!err)
4047                         err = err2;
4048         }
4049
4050 done:
4051         return err;
4052 }
4053
4054 static void tg3_phy_copper_begin(struct tg3 *tp)
4055 {
4056         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4057             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4058                 u32 adv, fc;
4059
4060                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4061                         adv = ADVERTISED_10baseT_Half |
4062                               ADVERTISED_10baseT_Full;
4063                         if (tg3_flag(tp, WOL_SPEED_100MB))
4064                                 adv |= ADVERTISED_100baseT_Half |
4065                                        ADVERTISED_100baseT_Full;
4066
4067                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4068                 } else {
4069                         adv = tp->link_config.advertising;
4070                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4071                                 adv &= ~(ADVERTISED_1000baseT_Half |
4072                                          ADVERTISED_1000baseT_Full);
4073
4074                         fc = tp->link_config.flowctrl;
4075                 }
4076
4077                 tg3_phy_autoneg_cfg(tp, adv, fc);
4078
4079                 tg3_writephy(tp, MII_BMCR,
4080                              BMCR_ANENABLE | BMCR_ANRESTART);
4081         } else {
4082                 int i;
4083                 u32 bmcr, orig_bmcr;
4084
4085                 tp->link_config.active_speed = tp->link_config.speed;
4086                 tp->link_config.active_duplex = tp->link_config.duplex;
4087
4088                 bmcr = 0;
4089                 switch (tp->link_config.speed) {
4090                 default:
4091                 case SPEED_10:
4092                         break;
4093
4094                 case SPEED_100:
4095                         bmcr |= BMCR_SPEED100;
4096                         break;
4097
4098                 case SPEED_1000:
4099                         bmcr |= BMCR_SPEED1000;
4100                         break;
4101                 }
4102
4103                 if (tp->link_config.duplex == DUPLEX_FULL)
4104                         bmcr |= BMCR_FULLDPLX;
4105
4106                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4107                     (bmcr != orig_bmcr)) {
4108                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4109                         for (i = 0; i < 1500; i++) {
4110                                 u32 tmp;
4111
4112                                 udelay(10);
4113                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4114                                     tg3_readphy(tp, MII_BMSR, &tmp))
4115                                         continue;
4116                                 if (!(tmp & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121                         tg3_writephy(tp, MII_BMCR, bmcr);
4122                         udelay(40);
4123                 }
4124         }
4125 }
4126
4127 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4128 {
4129         int err;
4130
4131         /* Turn off tap power management. */
4132         /* Set Extended packet length bit */
4133         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4134
4135         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4136         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4137         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4138         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4139         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4140
4141         udelay(40);
4142
4143         return err;
4144 }
4145
4146 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4147 {
4148         u32 advmsk, tgtadv, advertising;
4149
4150         advertising = tp->link_config.advertising;
4151         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4152
4153         advmsk = ADVERTISE_ALL;
4154         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4155                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4156                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4157         }
4158
4159         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4160                 return false;
4161
4162         if ((*lcladv & advmsk) != tgtadv)
4163                 return false;
4164
4165         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4166                 u32 tg3_ctrl;
4167
4168                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4169
4170                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4171                         return false;
4172
4173                 if (tgtadv &&
4174                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4175                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4176                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4177                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4178                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4179                 } else {
4180                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4181                 }
4182
4183                 if (tg3_ctrl != tgtadv)
4184                         return false;
4185         }
4186
4187         return true;
4188 }
4189
4190 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4191 {
4192         u32 lpeth = 0;
4193
4194         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4195                 u32 val;
4196
4197                 if (tg3_readphy(tp, MII_STAT1000, &val))
4198                         return false;
4199
4200                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4201         }
4202
4203         if (tg3_readphy(tp, MII_LPA, rmtadv))
4204                 return false;
4205
4206         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4207         tp->link_config.rmt_adv = lpeth;
4208
4209         return true;
4210 }
4211
4212 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4213 {
4214         if (curr_link_up != tp->link_up) {
4215                 if (curr_link_up) {
4216                         tg3_carrier_on(tp);
4217                 } else {
4218                         tg3_carrier_off(tp);
4219                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4220                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4221                 }
4222
4223                 tg3_link_report(tp);
4224                 return true;
4225         }
4226
4227         return false;
4228 }
4229
4230 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4231 {
4232         int current_link_up;
4233         u32 bmsr, val;
4234         u32 lcl_adv, rmt_adv;
4235         u16 current_speed;
4236         u8 current_duplex;
4237         int i, err;
4238
4239         tw32(MAC_EVENT, 0);
4240
4241         tw32_f(MAC_STATUS,
4242              (MAC_STATUS_SYNC_CHANGED |
4243               MAC_STATUS_CFG_CHANGED |
4244               MAC_STATUS_MI_COMPLETION |
4245               MAC_STATUS_LNKSTATE_CHANGED));
4246         udelay(40);
4247
4248         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4249                 tw32_f(MAC_MI_MODE,
4250                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4251                 udelay(80);
4252         }
4253
4254         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4255
4256         /* Some third-party PHYs need to be reset on link going
4257          * down.
4258          */
4259         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4260              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4261              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4262             tp->link_up) {
4263                 tg3_readphy(tp, MII_BMSR, &bmsr);
4264                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4265                     !(bmsr & BMSR_LSTATUS))
4266                         force_reset = 1;
4267         }
4268         if (force_reset)
4269                 tg3_phy_reset(tp);
4270
4271         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4272                 tg3_readphy(tp, MII_BMSR, &bmsr);
4273                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4274                     !tg3_flag(tp, INIT_COMPLETE))
4275                         bmsr = 0;
4276
4277                 if (!(bmsr & BMSR_LSTATUS)) {
4278                         err = tg3_init_5401phy_dsp(tp);
4279                         if (err)
4280                                 return err;
4281
4282                         tg3_readphy(tp, MII_BMSR, &bmsr);
4283                         for (i = 0; i < 1000; i++) {
4284                                 udelay(10);
4285                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4286                                     (bmsr & BMSR_LSTATUS)) {
4287                                         udelay(40);
4288                                         break;
4289                                 }
4290                         }
4291
4292                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4293                             TG3_PHY_REV_BCM5401_B0 &&
4294                             !(bmsr & BMSR_LSTATUS) &&
4295                             tp->link_config.active_speed == SPEED_1000) {
4296                                 err = tg3_phy_reset(tp);
4297                                 if (!err)
4298                                         err = tg3_init_5401phy_dsp(tp);
4299                                 if (err)
4300                                         return err;
4301                         }
4302                 }
4303         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4304                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4305                 /* 5701 {A0,B0} CRC bug workaround */
4306                 tg3_writephy(tp, 0x15, 0x0a75);
4307                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4308                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4309                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4310         }
4311
4312         /* Clear pending interrupts... */
4313         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4314         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4315
4316         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4317                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4318         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4319                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4320
4321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4323                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4324                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4325                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4326                 else
4327                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4328         }
4329
4330         current_link_up = 0;
4331         current_speed = SPEED_UNKNOWN;
4332         current_duplex = DUPLEX_UNKNOWN;
4333         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4334         tp->link_config.rmt_adv = 0;
4335
4336         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4337                 err = tg3_phy_auxctl_read(tp,
4338                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4339                                           &val);
4340                 if (!err && !(val & (1 << 10))) {
4341                         tg3_phy_auxctl_write(tp,
4342                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4343                                              val | (1 << 10));
4344                         goto relink;
4345                 }
4346         }
4347
4348         bmsr = 0;
4349         for (i = 0; i < 100; i++) {
4350                 tg3_readphy(tp, MII_BMSR, &bmsr);
4351                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4352                     (bmsr & BMSR_LSTATUS))
4353                         break;
4354                 udelay(40);
4355         }
4356
4357         if (bmsr & BMSR_LSTATUS) {
4358                 u32 aux_stat, bmcr;
4359
4360                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4361                 for (i = 0; i < 2000; i++) {
4362                         udelay(10);
4363                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4364                             aux_stat)
4365                                 break;
4366                 }
4367
4368                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4369                                              &current_speed,
4370                                              &current_duplex);
4371
4372                 bmcr = 0;
4373                 for (i = 0; i < 200; i++) {
4374                         tg3_readphy(tp, MII_BMCR, &bmcr);
4375                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4376                                 continue;
4377                         if (bmcr && bmcr != 0x7fff)
4378                                 break;
4379                         udelay(10);
4380                 }
4381
4382                 lcl_adv = 0;
4383                 rmt_adv = 0;
4384
4385                 tp->link_config.active_speed = current_speed;
4386                 tp->link_config.active_duplex = current_duplex;
4387
4388                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4389                         if ((bmcr & BMCR_ANENABLE) &&
4390                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4391                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4392                                 current_link_up = 1;
4393                 } else {
4394                         if (!(bmcr & BMCR_ANENABLE) &&
4395                             tp->link_config.speed == current_speed &&
4396                             tp->link_config.duplex == current_duplex &&
4397                             tp->link_config.flowctrl ==
4398                             tp->link_config.active_flowctrl) {
4399                                 current_link_up = 1;
4400                         }
4401                 }
4402
4403                 if (current_link_up == 1 &&
4404                     tp->link_config.active_duplex == DUPLEX_FULL) {
4405                         u32 reg, bit;
4406
4407                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4408                                 reg = MII_TG3_FET_GEN_STAT;
4409                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4410                         } else {
4411                                 reg = MII_TG3_EXT_STAT;
4412                                 bit = MII_TG3_EXT_STAT_MDIX;
4413                         }
4414
4415                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4416                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4417
4418                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4419                 }
4420         }
4421
4422 relink:
4423         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4424                 tg3_phy_copper_begin(tp);
4425
4426                 tg3_readphy(tp, MII_BMSR, &bmsr);
4427                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4428                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4429                         current_link_up = 1;
4430         }
4431
4432         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4433         if (current_link_up == 1) {
4434                 if (tp->link_config.active_speed == SPEED_100 ||
4435                     tp->link_config.active_speed == SPEED_10)
4436                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4437                 else
4438                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4439         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4440                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4441         else
4442                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4443
4444         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4445         if (tp->link_config.active_duplex == DUPLEX_HALF)
4446                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4447
4448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4449                 if (current_link_up == 1 &&
4450                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4451                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4452                 else
4453                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4454         }
4455
4456         /* ??? Without this setting Netgear GA302T PHY does not
4457          * ??? send/receive packets...
4458          */
4459         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4460             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4461                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4462                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4463                 udelay(80);
4464         }
4465
4466         tw32_f(MAC_MODE, tp->mac_mode);
4467         udelay(40);
4468
4469         tg3_phy_eee_adjust(tp, current_link_up);
4470
4471         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4472                 /* Polled via timer. */
4473                 tw32_f(MAC_EVENT, 0);
4474         } else {
4475                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4476         }
4477         udelay(40);
4478
4479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4480             current_link_up == 1 &&
4481             tp->link_config.active_speed == SPEED_1000 &&
4482             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4483                 udelay(120);
4484                 tw32_f(MAC_STATUS,
4485                      (MAC_STATUS_SYNC_CHANGED |
4486                       MAC_STATUS_CFG_CHANGED));
4487                 udelay(40);
4488                 tg3_write_mem(tp,
4489                               NIC_SRAM_FIRMWARE_MBOX,
4490                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4491         }
4492
4493         /* Prevent send BD corruption. */
4494         if (tg3_flag(tp, CLKREQ_BUG)) {
4495                 if (tp->link_config.active_speed == SPEED_100 ||
4496                     tp->link_config.active_speed == SPEED_10)
4497                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4498                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4499                 else
4500                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4501                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4502         }
4503
4504         tg3_test_and_report_link_chg(tp, current_link_up);
4505
4506         return 0;
4507 }
4508
4509 struct tg3_fiber_aneginfo {
4510         int state;
4511 #define ANEG_STATE_UNKNOWN              0
4512 #define ANEG_STATE_AN_ENABLE            1
4513 #define ANEG_STATE_RESTART_INIT         2
4514 #define ANEG_STATE_RESTART              3
4515 #define ANEG_STATE_DISABLE_LINK_OK      4
4516 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4517 #define ANEG_STATE_ABILITY_DETECT       6
4518 #define ANEG_STATE_ACK_DETECT_INIT      7
4519 #define ANEG_STATE_ACK_DETECT           8
4520 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4521 #define ANEG_STATE_COMPLETE_ACK         10
4522 #define ANEG_STATE_IDLE_DETECT_INIT     11
4523 #define ANEG_STATE_IDLE_DETECT          12
4524 #define ANEG_STATE_LINK_OK              13
4525 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4526 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4527
4528         u32 flags;
4529 #define MR_AN_ENABLE            0x00000001
4530 #define MR_RESTART_AN           0x00000002
4531 #define MR_AN_COMPLETE          0x00000004
4532 #define MR_PAGE_RX              0x00000008
4533 #define MR_NP_LOADED            0x00000010
4534 #define MR_TOGGLE_TX            0x00000020
4535 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4536 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4537 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4538 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4539 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4540 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4541 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4542 #define MR_TOGGLE_RX            0x00002000
4543 #define MR_NP_RX                0x00004000
4544
4545 #define MR_LINK_OK              0x80000000
4546
4547         unsigned long link_time, cur_time;
4548
4549         u32 ability_match_cfg;
4550         int ability_match_count;
4551
4552         char ability_match, idle_match, ack_match;
4553
4554         u32 txconfig, rxconfig;
4555 #define ANEG_CFG_NP             0x00000080
4556 #define ANEG_CFG_ACK            0x00000040
4557 #define ANEG_CFG_RF2            0x00000020
4558 #define ANEG_CFG_RF1            0x00000010
4559 #define ANEG_CFG_PS2            0x00000001
4560 #define ANEG_CFG_PS1            0x00008000
4561 #define ANEG_CFG_HD             0x00004000
4562 #define ANEG_CFG_FD             0x00002000
4563 #define ANEG_CFG_INVAL          0x00001f06
4564
4565 };
4566 #define ANEG_OK         0
4567 #define ANEG_DONE       1
4568 #define ANEG_TIMER_ENAB 2
4569 #define ANEG_FAILED     -1
4570
4571 #define ANEG_STATE_SETTLE_TIME  10000
4572
4573 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4574                                    struct tg3_fiber_aneginfo *ap)
4575 {
4576         u16 flowctrl;
4577         unsigned long delta;
4578         u32 rx_cfg_reg;
4579         int ret;
4580
4581         if (ap->state == ANEG_STATE_UNKNOWN) {
4582                 ap->rxconfig = 0;
4583                 ap->link_time = 0;
4584                 ap->cur_time = 0;
4585                 ap->ability_match_cfg = 0;
4586                 ap->ability_match_count = 0;
4587                 ap->ability_match = 0;
4588                 ap->idle_match = 0;
4589                 ap->ack_match = 0;
4590         }
4591         ap->cur_time++;
4592
4593         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4594                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4595
4596                 if (rx_cfg_reg != ap->ability_match_cfg) {
4597                         ap->ability_match_cfg = rx_cfg_reg;
4598                         ap->ability_match = 0;
4599                         ap->ability_match_count = 0;
4600                 } else {
4601                         if (++ap->ability_match_count > 1) {
4602                                 ap->ability_match = 1;
4603                                 ap->ability_match_cfg = rx_cfg_reg;
4604                         }
4605                 }
4606                 if (rx_cfg_reg & ANEG_CFG_ACK)
4607                         ap->ack_match = 1;
4608                 else
4609                         ap->ack_match = 0;
4610
4611                 ap->idle_match = 0;
4612         } else {
4613                 ap->idle_match = 1;
4614                 ap->ability_match_cfg = 0;
4615                 ap->ability_match_count = 0;
4616                 ap->ability_match = 0;
4617                 ap->ack_match = 0;
4618
4619                 rx_cfg_reg = 0;
4620         }
4621
4622         ap->rxconfig = rx_cfg_reg;
4623         ret = ANEG_OK;
4624
4625         switch (ap->state) {
4626         case ANEG_STATE_UNKNOWN:
4627                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4628                         ap->state = ANEG_STATE_AN_ENABLE;
4629
4630                 /* fallthru */
4631         case ANEG_STATE_AN_ENABLE:
4632                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4633                 if (ap->flags & MR_AN_ENABLE) {
4634                         ap->link_time = 0;
4635                         ap->cur_time = 0;
4636                         ap->ability_match_cfg = 0;
4637                         ap->ability_match_count = 0;
4638                         ap->ability_match = 0;
4639                         ap->idle_match = 0;
4640                         ap->ack_match = 0;
4641
4642                         ap->state = ANEG_STATE_RESTART_INIT;
4643                 } else {
4644                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4645                 }
4646                 break;
4647
4648         case ANEG_STATE_RESTART_INIT:
4649                 ap->link_time = ap->cur_time;
4650                 ap->flags &= ~(MR_NP_LOADED);
4651                 ap->txconfig = 0;
4652                 tw32(MAC_TX_AUTO_NEG, 0);
4653                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4654                 tw32_f(MAC_MODE, tp->mac_mode);
4655                 udelay(40);
4656
4657                 ret = ANEG_TIMER_ENAB;
4658                 ap->state = ANEG_STATE_RESTART;
4659
4660                 /* fallthru */
4661         case ANEG_STATE_RESTART:
4662                 delta = ap->cur_time - ap->link_time;
4663                 if (delta > ANEG_STATE_SETTLE_TIME)
4664                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4665                 else
4666                         ret = ANEG_TIMER_ENAB;
4667                 break;
4668
4669         case ANEG_STATE_DISABLE_LINK_OK:
4670                 ret = ANEG_DONE;
4671                 break;
4672
4673         case ANEG_STATE_ABILITY_DETECT_INIT:
4674                 ap->flags &= ~(MR_TOGGLE_TX);
4675                 ap->txconfig = ANEG_CFG_FD;
4676                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4677                 if (flowctrl & ADVERTISE_1000XPAUSE)
4678                         ap->txconfig |= ANEG_CFG_PS1;
4679                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4680                         ap->txconfig |= ANEG_CFG_PS2;
4681                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4682                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4683                 tw32_f(MAC_MODE, tp->mac_mode);
4684                 udelay(40);
4685
4686                 ap->state = ANEG_STATE_ABILITY_DETECT;
4687                 break;
4688
4689         case ANEG_STATE_ABILITY_DETECT:
4690                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4691                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4692                 break;
4693
4694         case ANEG_STATE_ACK_DETECT_INIT:
4695                 ap->txconfig |= ANEG_CFG_ACK;
4696                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4697                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4698                 tw32_f(MAC_MODE, tp->mac_mode);
4699                 udelay(40);
4700
4701                 ap->state = ANEG_STATE_ACK_DETECT;
4702
4703                 /* fallthru */
4704         case ANEG_STATE_ACK_DETECT:
4705                 if (ap->ack_match != 0) {
4706                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4707                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4708                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4709                         } else {
4710                                 ap->state = ANEG_STATE_AN_ENABLE;
4711                         }
4712                 } else if (ap->ability_match != 0 &&
4713                            ap->rxconfig == 0) {
4714                         ap->state = ANEG_STATE_AN_ENABLE;
4715                 }
4716                 break;
4717
4718         case ANEG_STATE_COMPLETE_ACK_INIT:
4719                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4720                         ret = ANEG_FAILED;
4721                         break;
4722                 }
4723                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4724                                MR_LP_ADV_HALF_DUPLEX |
4725                                MR_LP_ADV_SYM_PAUSE |
4726                                MR_LP_ADV_ASYM_PAUSE |
4727                                MR_LP_ADV_REMOTE_FAULT1 |
4728                                MR_LP_ADV_REMOTE_FAULT2 |
4729                                MR_LP_ADV_NEXT_PAGE |
4730                                MR_TOGGLE_RX |
4731                                MR_NP_RX);
4732                 if (ap->rxconfig & ANEG_CFG_FD)
4733                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4734                 if (ap->rxconfig & ANEG_CFG_HD)
4735                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4736                 if (ap->rxconfig & ANEG_CFG_PS1)
4737                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4738                 if (ap->rxconfig & ANEG_CFG_PS2)
4739                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4740                 if (ap->rxconfig & ANEG_CFG_RF1)
4741                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4742                 if (ap->rxconfig & ANEG_CFG_RF2)
4743                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4744                 if (ap->rxconfig & ANEG_CFG_NP)
4745                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4746
4747                 ap->link_time = ap->cur_time;
4748
4749                 ap->flags ^= (MR_TOGGLE_TX);
4750                 if (ap->rxconfig & 0x0008)
4751                         ap->flags |= MR_TOGGLE_RX;
4752                 if (ap->rxconfig & ANEG_CFG_NP)
4753                         ap->flags |= MR_NP_RX;
4754                 ap->flags |= MR_PAGE_RX;
4755
4756                 ap->state = ANEG_STATE_COMPLETE_ACK;
4757                 ret = ANEG_TIMER_ENAB;
4758                 break;
4759
4760         case ANEG_STATE_COMPLETE_ACK:
4761                 if (ap->ability_match != 0 &&
4762                     ap->rxconfig == 0) {
4763                         ap->state = ANEG_STATE_AN_ENABLE;
4764                         break;
4765                 }
4766                 delta = ap->cur_time - ap->link_time;
4767                 if (delta > ANEG_STATE_SETTLE_TIME) {
4768                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4769                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4770                         } else {
4771                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4772                                     !(ap->flags & MR_NP_RX)) {
4773                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4774                                 } else {
4775                                         ret = ANEG_FAILED;
4776                                 }
4777                         }
4778                 }
4779                 break;
4780
4781         case ANEG_STATE_IDLE_DETECT_INIT:
4782                 ap->link_time = ap->cur_time;
4783                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4784                 tw32_f(MAC_MODE, tp->mac_mode);
4785                 udelay(40);
4786
4787                 ap->state = ANEG_STATE_IDLE_DETECT;
4788                 ret = ANEG_TIMER_ENAB;
4789                 break;
4790
4791         case ANEG_STATE_IDLE_DETECT:
4792                 if (ap->ability_match != 0 &&
4793                     ap->rxconfig == 0) {
4794                         ap->state = ANEG_STATE_AN_ENABLE;
4795                         break;
4796                 }
4797                 delta = ap->cur_time - ap->link_time;
4798                 if (delta > ANEG_STATE_SETTLE_TIME) {
4799                         /* XXX another gem from the Broadcom driver :( */
4800                         ap->state = ANEG_STATE_LINK_OK;
4801                 }
4802                 break;
4803
4804         case ANEG_STATE_LINK_OK:
4805                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4806                 ret = ANEG_DONE;
4807                 break;
4808
4809         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4810                 /* ??? unimplemented */
4811                 break;
4812
4813         case ANEG_STATE_NEXT_PAGE_WAIT:
4814                 /* ??? unimplemented */
4815                 break;
4816
4817         default:
4818                 ret = ANEG_FAILED;
4819                 break;
4820         }
4821
4822         return ret;
4823 }
4824
4825 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4826 {
4827         int res = 0;
4828         struct tg3_fiber_aneginfo aninfo;
4829         int status = ANEG_FAILED;
4830         unsigned int tick;
4831         u32 tmp;
4832
4833         tw32_f(MAC_TX_AUTO_NEG, 0);
4834
4835         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4836         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4837         udelay(40);
4838
4839         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4840         udelay(40);
4841
4842         memset(&aninfo, 0, sizeof(aninfo));
4843         aninfo.flags |= MR_AN_ENABLE;
4844         aninfo.state = ANEG_STATE_UNKNOWN;
4845         aninfo.cur_time = 0;
4846         tick = 0;
4847         while (++tick < 195000) {
4848                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4849                 if (status == ANEG_DONE || status == ANEG_FAILED)
4850                         break;
4851
4852                 udelay(1);
4853         }
4854
4855         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4856         tw32_f(MAC_MODE, tp->mac_mode);
4857         udelay(40);
4858
4859         *txflags = aninfo.txconfig;
4860         *rxflags = aninfo.flags;
4861
4862         if (status == ANEG_DONE &&
4863             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4864                              MR_LP_ADV_FULL_DUPLEX)))
4865                 res = 1;
4866
4867         return res;
4868 }
4869
4870 static void tg3_init_bcm8002(struct tg3 *tp)
4871 {
4872         u32 mac_status = tr32(MAC_STATUS);
4873         int i;
4874
4875         /* Reset when initting first time or we have a link. */
4876         if (tg3_flag(tp, INIT_COMPLETE) &&
4877             !(mac_status & MAC_STATUS_PCS_SYNCED))
4878                 return;
4879
4880         /* Set PLL lock range. */
4881         tg3_writephy(tp, 0x16, 0x8007);
4882
4883         /* SW reset */
4884         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4885
4886         /* Wait for reset to complete. */
4887         /* XXX schedule_timeout() ... */
4888         for (i = 0; i < 500; i++)
4889                 udelay(10);
4890
4891         /* Config mode; select PMA/Ch 1 regs. */
4892         tg3_writephy(tp, 0x10, 0x8411);
4893
4894         /* Enable auto-lock and comdet, select txclk for tx. */
4895         tg3_writephy(tp, 0x11, 0x0a10);
4896
4897         tg3_writephy(tp, 0x18, 0x00a0);
4898         tg3_writephy(tp, 0x16, 0x41ff);
4899
4900         /* Assert and deassert POR. */
4901         tg3_writephy(tp, 0x13, 0x0400);
4902         udelay(40);
4903         tg3_writephy(tp, 0x13, 0x0000);
4904
4905         tg3_writephy(tp, 0x11, 0x0a50);
4906         udelay(40);
4907         tg3_writephy(tp, 0x11, 0x0a10);
4908
4909         /* Wait for signal to stabilize */
4910         /* XXX schedule_timeout() ... */
4911         for (i = 0; i < 15000; i++)
4912                 udelay(10);
4913
4914         /* Deselect the channel register so we can read the PHYID
4915          * later.
4916          */
4917         tg3_writephy(tp, 0x10, 0x8011);
4918 }
4919
4920 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4921 {
4922         u16 flowctrl;
4923         u32 sg_dig_ctrl, sg_dig_status;
4924         u32 serdes_cfg, expected_sg_dig_ctrl;
4925         int workaround, port_a;
4926         int current_link_up;
4927
4928         serdes_cfg = 0;
4929         expected_sg_dig_ctrl = 0;
4930         workaround = 0;
4931         port_a = 1;
4932         current_link_up = 0;
4933
4934         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4935             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4936                 workaround = 1;
4937                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4938                         port_a = 0;
4939
4940                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4941                 /* preserve bits 20-23 for voltage regulator */
4942                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4943         }
4944
4945         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4946
4947         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4948                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4949                         if (workaround) {
4950                                 u32 val = serdes_cfg;
4951
4952                                 if (port_a)
4953                                         val |= 0xc010000;
4954                                 else
4955                                         val |= 0x4010000;
4956                                 tw32_f(MAC_SERDES_CFG, val);
4957                         }
4958
4959                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4960                 }
4961                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4962                         tg3_setup_flow_control(tp, 0, 0);
4963                         current_link_up = 1;
4964                 }
4965                 goto out;
4966         }
4967
4968         /* Want auto-negotiation.  */
4969         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4970
4971         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4972         if (flowctrl & ADVERTISE_1000XPAUSE)
4973                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4974         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4975                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4976
4977         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4978                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4979                     tp->serdes_counter &&
4980                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4981                                     MAC_STATUS_RCVD_CFG)) ==
4982                      MAC_STATUS_PCS_SYNCED)) {
4983                         tp->serdes_counter--;
4984                         current_link_up = 1;
4985                         goto out;
4986                 }
4987 restart_autoneg:
4988                 if (workaround)
4989                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4990                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4991                 udelay(5);
4992                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4993
4994                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4995                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4996         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4997                                  MAC_STATUS_SIGNAL_DET)) {
4998                 sg_dig_status = tr32(SG_DIG_STATUS);
4999                 mac_status = tr32(MAC_STATUS);
5000
5001                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5002                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5003                         u32 local_adv = 0, remote_adv = 0;
5004
5005                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5006                                 local_adv |= ADVERTISE_1000XPAUSE;
5007                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5008                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5009
5010                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5011                                 remote_adv |= LPA_1000XPAUSE;
5012                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5013                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5014
5015                         tp->link_config.rmt_adv =
5016                                            mii_adv_to_ethtool_adv_x(remote_adv);
5017
5018                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5019                         current_link_up = 1;
5020                         tp->serdes_counter = 0;
5021                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5022                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5023                         if (tp->serdes_counter)
5024                                 tp->serdes_counter--;
5025                         else {
5026                                 if (workaround) {
5027                                         u32 val = serdes_cfg;
5028
5029                                         if (port_a)
5030                                                 val |= 0xc010000;
5031                                         else
5032                                                 val |= 0x4010000;
5033
5034                                         tw32_f(MAC_SERDES_CFG, val);
5035                                 }
5036
5037                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5038                                 udelay(40);
5039
5040                                 /* Link parallel detection - link is up */
5041                                 /* only if we have PCS_SYNC and not */
5042                                 /* receiving config code words */
5043                                 mac_status = tr32(MAC_STATUS);
5044                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5045                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5046                                         tg3_setup_flow_control(tp, 0, 0);
5047                                         current_link_up = 1;
5048                                         tp->phy_flags |=
5049                                                 TG3_PHYFLG_PARALLEL_DETECT;
5050                                         tp->serdes_counter =
5051                                                 SERDES_PARALLEL_DET_TIMEOUT;
5052                                 } else
5053                                         goto restart_autoneg;
5054                         }
5055                 }
5056         } else {
5057                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5058                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5059         }
5060
5061 out:
5062         return current_link_up;
5063 }
5064
5065 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5066 {
5067         int current_link_up = 0;
5068
5069         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5070                 goto out;
5071
5072         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5073                 u32 txflags, rxflags;
5074                 int i;
5075
5076                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5077                         u32 local_adv = 0, remote_adv = 0;
5078
5079                         if (txflags & ANEG_CFG_PS1)
5080                                 local_adv |= ADVERTISE_1000XPAUSE;
5081                         if (txflags & ANEG_CFG_PS2)
5082                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5083
5084                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5085                                 remote_adv |= LPA_1000XPAUSE;
5086                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5087                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5088
5089                         tp->link_config.rmt_adv =
5090                                            mii_adv_to_ethtool_adv_x(remote_adv);
5091
5092                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5093
5094                         current_link_up = 1;
5095                 }
5096                 for (i = 0; i < 30; i++) {
5097                         udelay(20);
5098                         tw32_f(MAC_STATUS,
5099                                (MAC_STATUS_SYNC_CHANGED |
5100                                 MAC_STATUS_CFG_CHANGED));
5101                         udelay(40);
5102                         if ((tr32(MAC_STATUS) &
5103                              (MAC_STATUS_SYNC_CHANGED |
5104                               MAC_STATUS_CFG_CHANGED)) == 0)
5105                                 break;
5106                 }
5107
5108                 mac_status = tr32(MAC_STATUS);
5109                 if (current_link_up == 0 &&
5110                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5111                     !(mac_status & MAC_STATUS_RCVD_CFG))
5112                         current_link_up = 1;
5113         } else {
5114                 tg3_setup_flow_control(tp, 0, 0);
5115
5116                 /* Forcing 1000FD link up. */
5117                 current_link_up = 1;
5118
5119                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5120                 udelay(40);
5121
5122                 tw32_f(MAC_MODE, tp->mac_mode);
5123                 udelay(40);
5124         }
5125
5126 out:
5127         return current_link_up;
5128 }
5129
5130 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5131 {
5132         u32 orig_pause_cfg;
5133         u16 orig_active_speed;
5134         u8 orig_active_duplex;
5135         u32 mac_status;
5136         int current_link_up;
5137         int i;
5138
5139         orig_pause_cfg = tp->link_config.active_flowctrl;
5140         orig_active_speed = tp->link_config.active_speed;
5141         orig_active_duplex = tp->link_config.active_duplex;
5142
5143         if (!tg3_flag(tp, HW_AUTONEG) &&
5144             tp->link_up &&
5145             tg3_flag(tp, INIT_COMPLETE)) {
5146                 mac_status = tr32(MAC_STATUS);
5147                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5148                                MAC_STATUS_SIGNAL_DET |
5149                                MAC_STATUS_CFG_CHANGED |
5150                                MAC_STATUS_RCVD_CFG);
5151                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5152                                    MAC_STATUS_SIGNAL_DET)) {
5153                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5154                                             MAC_STATUS_CFG_CHANGED));
5155                         return 0;
5156                 }
5157         }
5158
5159         tw32_f(MAC_TX_AUTO_NEG, 0);
5160
5161         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5162         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5163         tw32_f(MAC_MODE, tp->mac_mode);
5164         udelay(40);
5165
5166         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5167                 tg3_init_bcm8002(tp);
5168
5169         /* Enable link change event even when serdes polling.  */
5170         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5171         udelay(40);
5172
5173         current_link_up = 0;
5174         tp->link_config.rmt_adv = 0;
5175         mac_status = tr32(MAC_STATUS);
5176
5177         if (tg3_flag(tp, HW_AUTONEG))
5178                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5179         else
5180                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5181
5182         tp->napi[0].hw_status->status =
5183                 (SD_STATUS_UPDATED |
5184                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5185
5186         for (i = 0; i < 100; i++) {
5187                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5188                                     MAC_STATUS_CFG_CHANGED));
5189                 udelay(5);
5190                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5191                                          MAC_STATUS_CFG_CHANGED |
5192                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5193                         break;
5194         }
5195
5196         mac_status = tr32(MAC_STATUS);
5197         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5198                 current_link_up = 0;
5199                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5200                     tp->serdes_counter == 0) {
5201                         tw32_f(MAC_MODE, (tp->mac_mode |
5202                                           MAC_MODE_SEND_CONFIGS));
5203                         udelay(1);
5204                         tw32_f(MAC_MODE, tp->mac_mode);
5205                 }
5206         }
5207
5208         if (current_link_up == 1) {
5209                 tp->link_config.active_speed = SPEED_1000;
5210                 tp->link_config.active_duplex = DUPLEX_FULL;
5211                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5212                                     LED_CTRL_LNKLED_OVERRIDE |
5213                                     LED_CTRL_1000MBPS_ON));
5214         } else {
5215                 tp->link_config.active_speed = SPEED_UNKNOWN;
5216                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5217                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5218                                     LED_CTRL_LNKLED_OVERRIDE |
5219                                     LED_CTRL_TRAFFIC_OVERRIDE));
5220         }
5221
5222         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5223                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5224                 if (orig_pause_cfg != now_pause_cfg ||
5225                     orig_active_speed != tp->link_config.active_speed ||
5226                     orig_active_duplex != tp->link_config.active_duplex)
5227                         tg3_link_report(tp);
5228         }
5229
5230         return 0;
5231 }
5232
5233 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5234 {
5235         int current_link_up, err = 0;
5236         u32 bmsr, bmcr;
5237         u16 current_speed;
5238         u8 current_duplex;
5239         u32 local_adv, remote_adv;
5240
5241         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5242         tw32_f(MAC_MODE, tp->mac_mode);
5243         udelay(40);
5244
5245         tw32(MAC_EVENT, 0);
5246
5247         tw32_f(MAC_STATUS,
5248              (MAC_STATUS_SYNC_CHANGED |
5249               MAC_STATUS_CFG_CHANGED |
5250               MAC_STATUS_MI_COMPLETION |
5251               MAC_STATUS_LNKSTATE_CHANGED));
5252         udelay(40);
5253
5254         if (force_reset)
5255                 tg3_phy_reset(tp);
5256
5257         current_link_up = 0;
5258         current_speed = SPEED_UNKNOWN;
5259         current_duplex = DUPLEX_UNKNOWN;
5260         tp->link_config.rmt_adv = 0;
5261
5262         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5263         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5264         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5265                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5266                         bmsr |= BMSR_LSTATUS;
5267                 else
5268                         bmsr &= ~BMSR_LSTATUS;
5269         }
5270
5271         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5272
5273         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5274             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5275                 /* do nothing, just check for link up at the end */
5276         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5277                 u32 adv, newadv;
5278
5279                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5281                                  ADVERTISE_1000XPAUSE |
5282                                  ADVERTISE_1000XPSE_ASYM |
5283                                  ADVERTISE_SLCT);
5284
5285                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5286                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5287
5288                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5289                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5290                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5291                         tg3_writephy(tp, MII_BMCR, bmcr);
5292
5293                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5294                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5295                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5296
5297                         return err;
5298                 }
5299         } else {
5300                 u32 new_bmcr;
5301
5302                 bmcr &= ~BMCR_SPEED1000;
5303                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5304
5305                 if (tp->link_config.duplex == DUPLEX_FULL)
5306                         new_bmcr |= BMCR_FULLDPLX;
5307
5308                 if (new_bmcr != bmcr) {
5309                         /* BMCR_SPEED1000 is a reserved bit that needs
5310                          * to be set on write.
5311                          */
5312                         new_bmcr |= BMCR_SPEED1000;
5313
5314                         /* Force a linkdown */
5315                         if (tp->link_up) {
5316                                 u32 adv;
5317
5318                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5319                                 adv &= ~(ADVERTISE_1000XFULL |
5320                                          ADVERTISE_1000XHALF |
5321                                          ADVERTISE_SLCT);
5322                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5323                                 tg3_writephy(tp, MII_BMCR, bmcr |
5324                                                            BMCR_ANRESTART |
5325                                                            BMCR_ANENABLE);
5326                                 udelay(10);
5327                                 tg3_carrier_off(tp);
5328                         }
5329                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5330                         bmcr = new_bmcr;
5331                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5332                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5334                             ASIC_REV_5714) {
5335                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5336                                         bmsr |= BMSR_LSTATUS;
5337                                 else
5338                                         bmsr &= ~BMSR_LSTATUS;
5339                         }
5340                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5341                 }
5342         }
5343
5344         if (bmsr & BMSR_LSTATUS) {
5345                 current_speed = SPEED_1000;
5346                 current_link_up = 1;
5347                 if (bmcr & BMCR_FULLDPLX)
5348                         current_duplex = DUPLEX_FULL;
5349                 else
5350                         current_duplex = DUPLEX_HALF;
5351
5352                 local_adv = 0;
5353                 remote_adv = 0;
5354
5355                 if (bmcr & BMCR_ANENABLE) {
5356                         u32 common;
5357
5358                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5359                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5360                         common = local_adv & remote_adv;
5361                         if (common & (ADVERTISE_1000XHALF |
5362                                       ADVERTISE_1000XFULL)) {
5363                                 if (common & ADVERTISE_1000XFULL)
5364                                         current_duplex = DUPLEX_FULL;
5365                                 else
5366                                         current_duplex = DUPLEX_HALF;
5367
5368                                 tp->link_config.rmt_adv =
5369                                            mii_adv_to_ethtool_adv_x(remote_adv);
5370                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5371                                 /* Link is up via parallel detect */
5372                         } else {
5373                                 current_link_up = 0;
5374                         }
5375                 }
5376         }
5377
5378         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5379                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5380
5381         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5382         if (tp->link_config.active_duplex == DUPLEX_HALF)
5383                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5384
5385         tw32_f(MAC_MODE, tp->mac_mode);
5386         udelay(40);
5387
5388         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5389
5390         tp->link_config.active_speed = current_speed;
5391         tp->link_config.active_duplex = current_duplex;
5392
5393         tg3_test_and_report_link_chg(tp, current_link_up);
5394         return err;
5395 }
5396
5397 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5398 {
5399         if (tp->serdes_counter) {
5400                 /* Give autoneg time to complete. */
5401                 tp->serdes_counter--;
5402                 return;
5403         }
5404
5405         if (!tp->link_up &&
5406             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5407                 u32 bmcr;
5408
5409                 tg3_readphy(tp, MII_BMCR, &bmcr);
5410                 if (bmcr & BMCR_ANENABLE) {
5411                         u32 phy1, phy2;
5412
5413                         /* Select shadow register 0x1f */
5414                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5415                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5416
5417                         /* Select expansion interrupt status register */
5418                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5419                                          MII_TG3_DSP_EXP1_INT_STAT);
5420                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5421                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5422
5423                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5424                                 /* We have signal detect and not receiving
5425                                  * config code words, link is up by parallel
5426                                  * detection.
5427                                  */
5428
5429                                 bmcr &= ~BMCR_ANENABLE;
5430                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5431                                 tg3_writephy(tp, MII_BMCR, bmcr);
5432                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5433                         }
5434                 }
5435         } else if (tp->link_up &&
5436                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5437                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5438                 u32 phy2;
5439
5440                 /* Select expansion interrupt status register */
5441                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5442                                  MII_TG3_DSP_EXP1_INT_STAT);
5443                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5444                 if (phy2 & 0x20) {
5445                         u32 bmcr;
5446
5447                         /* Config code words received, turn on autoneg. */
5448                         tg3_readphy(tp, MII_BMCR, &bmcr);
5449                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5450
5451                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5452
5453                 }
5454         }
5455 }
5456
5457 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5458 {
5459         u32 val;
5460         int err;
5461
5462         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5463                 err = tg3_setup_fiber_phy(tp, force_reset);
5464         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5465                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5466         else
5467                 err = tg3_setup_copper_phy(tp, force_reset);
5468
5469         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5470                 u32 scale;
5471
5472                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5473                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5474                         scale = 65;
5475                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5476                         scale = 6;
5477                 else
5478                         scale = 12;
5479
5480                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5481                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5482                 tw32(GRC_MISC_CFG, val);
5483         }
5484
5485         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5486               (6 << TX_LENGTHS_IPG_SHIFT);
5487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5488                 val |= tr32(MAC_TX_LENGTHS) &
5489                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5490                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5491
5492         if (tp->link_config.active_speed == SPEED_1000 &&
5493             tp->link_config.active_duplex == DUPLEX_HALF)
5494                 tw32(MAC_TX_LENGTHS, val |
5495                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5496         else
5497                 tw32(MAC_TX_LENGTHS, val |
5498                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5499
5500         if (!tg3_flag(tp, 5705_PLUS)) {
5501                 if (tp->link_up) {
5502                         tw32(HOSTCC_STAT_COAL_TICKS,
5503                              tp->coal.stats_block_coalesce_usecs);
5504                 } else {
5505                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5506                 }
5507         }
5508
5509         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5510                 val = tr32(PCIE_PWR_MGMT_THRESH);
5511                 if (!tp->link_up)
5512                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5513                               tp->pwrmgmt_thresh;
5514                 else
5515                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5516                 tw32(PCIE_PWR_MGMT_THRESH, val);
5517         }
5518
5519         return err;
5520 }
5521
5522 /* tp->lock must be held */
5523 static u64 tg3_refclk_read(struct tg3 *tp)
5524 {
5525         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
5526         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
5527 }
5528
5529 /* tp->lock must be held */
5530 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5531 {
5532         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5533         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5534         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5535         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5536 }
5537
5538 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
5539 static inline void tg3_full_unlock(struct tg3 *tp);
5540 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
5541 {
5542         struct tg3 *tp = netdev_priv(dev);
5543
5544         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5545                                 SOF_TIMESTAMPING_RX_SOFTWARE |
5546                                 SOF_TIMESTAMPING_SOFTWARE    |
5547                                 SOF_TIMESTAMPING_TX_HARDWARE |
5548                                 SOF_TIMESTAMPING_RX_HARDWARE |
5549                                 SOF_TIMESTAMPING_RAW_HARDWARE;
5550
5551         if (tp->ptp_clock)
5552                 info->phc_index = ptp_clock_index(tp->ptp_clock);
5553         else
5554                 info->phc_index = -1;
5555
5556         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
5557
5558         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
5559                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
5560                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
5561                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
5562         return 0;
5563 }
5564
5565 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
5566 {
5567         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5568         bool neg_adj = false;
5569         u32 correction = 0;
5570
5571         if (ppb < 0) {
5572                 neg_adj = true;
5573                 ppb = -ppb;
5574         }
5575
5576         /* Frequency adjustment is performed using hardware with a 24 bit
5577          * accumulator and a programmable correction value. On each clk, the
5578          * correction value gets added to the accumulator and when it
5579          * overflows, the time counter is incremented/decremented.
5580          *
5581          * So conversion from ppb to correction value is
5582          *              ppb * (1 << 24) / 1000000000
5583          */
5584         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
5585                      TG3_EAV_REF_CLK_CORRECT_MASK;
5586
5587         tg3_full_lock(tp, 0);
5588
5589         if (correction)
5590                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
5591                      TG3_EAV_REF_CLK_CORRECT_EN |
5592                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
5593         else
5594                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
5595
5596         tg3_full_unlock(tp);
5597
5598         return 0;
5599 }
5600
5601 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
5602 {
5603         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5604
5605         tg3_full_lock(tp, 0);
5606         tp->ptp_adjust += delta;
5607         tg3_full_unlock(tp);
5608
5609         return 0;
5610 }
5611
5612 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
5613 {
5614         u64 ns;
5615         u32 remainder;
5616         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5617
5618         tg3_full_lock(tp, 0);
5619         ns = tg3_refclk_read(tp);
5620         ns += tp->ptp_adjust;
5621         tg3_full_unlock(tp);
5622
5623         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
5624         ts->tv_nsec = remainder;
5625
5626         return 0;
5627 }
5628
5629 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
5630                            const struct timespec *ts)
5631 {
5632         u64 ns;
5633         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
5634
5635         ns = timespec_to_ns(ts);
5636
5637         tg3_full_lock(tp, 0);
5638         tg3_refclk_write(tp, ns);
5639         tp->ptp_adjust = 0;
5640         tg3_full_unlock(tp);
5641
5642         return 0;
5643 }
5644
5645 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
5646                           struct ptp_clock_request *rq, int on)
5647 {
5648         return -EOPNOTSUPP;
5649 }
5650
5651 static const struct ptp_clock_info tg3_ptp_caps = {
5652         .owner          = THIS_MODULE,
5653         .name           = "tg3 clock",
5654         .max_adj        = 250000000,
5655         .n_alarm        = 0,
5656         .n_ext_ts       = 0,
5657         .n_per_out      = 0,
5658         .pps            = 0,
5659         .adjfreq        = tg3_ptp_adjfreq,
5660         .adjtime        = tg3_ptp_adjtime,
5661         .gettime        = tg3_ptp_gettime,
5662         .settime        = tg3_ptp_settime,
5663         .enable         = tg3_ptp_enable,
5664 };
5665
5666 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
5667                                      struct skb_shared_hwtstamps *timestamp)
5668 {
5669         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
5670         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
5671                                            tp->ptp_adjust);
5672 }
5673
5674 /* tp->lock must be held */
5675 static void tg3_ptp_init(struct tg3 *tp)
5676 {
5677         if (!tg3_flag(tp, PTP_CAPABLE))
5678                 return;
5679
5680         /* Initialize the hardware clock to the system time. */
5681         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5682         tp->ptp_adjust = 0;
5683         tp->ptp_info = tg3_ptp_caps;
5684 }
5685
5686 /* tp->lock must be held */
5687 static void tg3_ptp_resume(struct tg3 *tp)
5688 {
5689         if (!tg3_flag(tp, PTP_CAPABLE))
5690                 return;
5691
5692         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5693         tp->ptp_adjust = 0;
5694 }
5695
5696 static void tg3_ptp_fini(struct tg3 *tp)
5697 {
5698         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5699                 return;
5700
5701         ptp_clock_unregister(tp->ptp_clock);
5702         tp->ptp_clock = NULL;
5703         tp->ptp_adjust = 0;
5704 }
5705
5706 static inline int tg3_irq_sync(struct tg3 *tp)
5707 {
5708         return tp->irq_sync;
5709 }
5710
5711 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5712 {
5713         int i;
5714
5715         dst = (u32 *)((u8 *)dst + off);
5716         for (i = 0; i < len; i += sizeof(u32))
5717                 *dst++ = tr32(off + i);
5718 }
5719
5720 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5721 {
5722         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5723         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5724         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5725         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5726         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5727         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5728         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5729         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5730         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5731         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5732         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5733         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5734         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5735         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5736         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5737         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5738         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5739         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5740         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5741
5742         if (tg3_flag(tp, SUPPORT_MSIX))
5743                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5744
5745         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5746         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5747         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5748         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5749         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5750         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5751         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5752         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5753
5754         if (!tg3_flag(tp, 5705_PLUS)) {
5755                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5756                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5757                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5758         }
5759
5760         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5761         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5762         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5763         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5764         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5765
5766         if (tg3_flag(tp, NVRAM))
5767                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5768 }
5769
5770 static void tg3_dump_state(struct tg3 *tp)
5771 {
5772         int i;
5773         u32 *regs;
5774
5775         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5776         if (!regs) {
5777                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5778                 return;
5779         }
5780
5781         if (tg3_flag(tp, PCI_EXPRESS)) {
5782                 /* Read up to but not including private PCI registers */
5783                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5784                         regs[i / sizeof(u32)] = tr32(i);
5785         } else
5786                 tg3_dump_legacy_regs(tp, regs);
5787
5788         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5789                 if (!regs[i + 0] && !regs[i + 1] &&
5790                     !regs[i + 2] && !regs[i + 3])
5791                         continue;
5792
5793                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5794                            i * 4,
5795                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5796         }
5797
5798         kfree(regs);
5799
5800         for (i = 0; i < tp->irq_cnt; i++) {
5801                 struct tg3_napi *tnapi = &tp->napi[i];
5802
5803                 /* SW status block */
5804                 netdev_err(tp->dev,
5805                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5806                            i,
5807                            tnapi->hw_status->status,
5808                            tnapi->hw_status->status_tag,
5809                            tnapi->hw_status->rx_jumbo_consumer,
5810                            tnapi->hw_status->rx_consumer,
5811                            tnapi->hw_status->rx_mini_consumer,
5812                            tnapi->hw_status->idx[0].rx_producer,
5813                            tnapi->hw_status->idx[0].tx_consumer);
5814
5815                 netdev_err(tp->dev,
5816                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5817                            i,
5818                            tnapi->last_tag, tnapi->last_irq_tag,
5819                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5820                            tnapi->rx_rcb_ptr,
5821                            tnapi->prodring.rx_std_prod_idx,
5822                            tnapi->prodring.rx_std_cons_idx,
5823                            tnapi->prodring.rx_jmb_prod_idx,
5824                            tnapi->prodring.rx_jmb_cons_idx);
5825         }
5826 }
5827
5828 /* This is called whenever we suspect that the system chipset is re-
5829  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5830  * is bogus tx completions. We try to recover by setting the
5831  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5832  * in the workqueue.
5833  */
5834 static void tg3_tx_recover(struct tg3 *tp)
5835 {
5836         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5837                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5838
5839         netdev_warn(tp->dev,
5840                     "The system may be re-ordering memory-mapped I/O "
5841                     "cycles to the network device, attempting to recover. "
5842                     "Please report the problem to the driver maintainer "
5843                     "and include system chipset information.\n");
5844
5845         spin_lock(&tp->lock);
5846         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5847         spin_unlock(&tp->lock);
5848 }
5849
5850 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5851 {
5852         /* Tell compiler to fetch tx indices from memory. */
5853         barrier();
5854         return tnapi->tx_pending -
5855                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5856 }
5857
5858 /* Tigon3 never reports partial packet sends.  So we do not
5859  * need special logic to handle SKBs that have not had all
5860  * of their frags sent yet, like SunGEM does.
5861  */
5862 static void tg3_tx(struct tg3_napi *tnapi)
5863 {
5864         struct tg3 *tp = tnapi->tp;
5865         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5866         u32 sw_idx = tnapi->tx_cons;
5867         struct netdev_queue *txq;
5868         int index = tnapi - tp->napi;
5869         unsigned int pkts_compl = 0, bytes_compl = 0;
5870
5871         if (tg3_flag(tp, ENABLE_TSS))
5872                 index--;
5873
5874         txq = netdev_get_tx_queue(tp->dev, index);
5875
5876         while (sw_idx != hw_idx) {
5877                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5878                 struct sk_buff *skb = ri->skb;
5879                 int i, tx_bug = 0;
5880
5881                 if (unlikely(skb == NULL)) {
5882                         tg3_tx_recover(tp);
5883                         return;
5884                 }
5885
5886                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
5887                         struct skb_shared_hwtstamps timestamp;
5888                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
5889                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
5890
5891                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
5892
5893                         skb_tstamp_tx(skb, &timestamp);
5894                 }
5895
5896                 pci_unmap_single(tp->pdev,
5897                                  dma_unmap_addr(ri, mapping),
5898                                  skb_headlen(skb),
5899                                  PCI_DMA_TODEVICE);
5900
5901                 ri->skb = NULL;
5902
5903                 while (ri->fragmented) {
5904                         ri->fragmented = false;
5905                         sw_idx = NEXT_TX(sw_idx);
5906                         ri = &tnapi->tx_buffers[sw_idx];
5907                 }
5908
5909                 sw_idx = NEXT_TX(sw_idx);
5910
5911                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5912                         ri = &tnapi->tx_buffers[sw_idx];
5913                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5914                                 tx_bug = 1;
5915
5916                         pci_unmap_page(tp->pdev,
5917                                        dma_unmap_addr(ri, mapping),
5918                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5919                                        PCI_DMA_TODEVICE);
5920
5921                         while (ri->fragmented) {
5922                                 ri->fragmented = false;
5923                                 sw_idx = NEXT_TX(sw_idx);
5924                                 ri = &tnapi->tx_buffers[sw_idx];
5925                         }
5926
5927                         sw_idx = NEXT_TX(sw_idx);
5928                 }
5929
5930                 pkts_compl++;
5931                 bytes_compl += skb->len;
5932
5933                 dev_kfree_skb(skb);
5934
5935                 if (unlikely(tx_bug)) {
5936                         tg3_tx_recover(tp);
5937                         return;
5938                 }
5939         }
5940
5941         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5942
5943         tnapi->tx_cons = sw_idx;
5944
5945         /* Need to make the tx_cons update visible to tg3_start_xmit()
5946          * before checking for netif_queue_stopped().  Without the
5947          * memory barrier, there is a small possibility that tg3_start_xmit()
5948          * will miss it and cause the queue to be stopped forever.
5949          */
5950         smp_mb();
5951
5952         if (unlikely(netif_tx_queue_stopped(txq) &&
5953                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5954                 __netif_tx_lock(txq, smp_processor_id());
5955                 if (netif_tx_queue_stopped(txq) &&
5956                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5957                         netif_tx_wake_queue(txq);
5958                 __netif_tx_unlock(txq);
5959         }
5960 }
5961
5962 static void tg3_frag_free(bool is_frag, void *data)
5963 {
5964         if (is_frag)
5965                 put_page(virt_to_head_page(data));
5966         else
5967                 kfree(data);
5968 }
5969
5970 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5971 {
5972         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5973                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5974
5975         if (!ri->data)
5976                 return;
5977
5978         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5979                          map_sz, PCI_DMA_FROMDEVICE);
5980         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5981         ri->data = NULL;
5982 }
5983
5984
5985 /* Returns size of skb allocated or < 0 on error.
5986  *
5987  * We only need to fill in the address because the other members
5988  * of the RX descriptor are invariant, see tg3_init_rings.
5989  *
5990  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5991  * posting buffers we only dirty the first cache line of the RX
5992  * descriptor (containing the address).  Whereas for the RX status
5993  * buffers the cpu only reads the last cacheline of the RX descriptor
5994  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5995  */
5996 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5997                              u32 opaque_key, u32 dest_idx_unmasked,
5998                              unsigned int *frag_size)
5999 {
6000         struct tg3_rx_buffer_desc *desc;
6001         struct ring_info *map;
6002         u8 *data;
6003         dma_addr_t mapping;
6004         int skb_size, data_size, dest_idx;
6005
6006         switch (opaque_key) {
6007         case RXD_OPAQUE_RING_STD:
6008                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6009                 desc = &tpr->rx_std[dest_idx];
6010                 map = &tpr->rx_std_buffers[dest_idx];
6011                 data_size = tp->rx_pkt_map_sz;
6012                 break;
6013
6014         case RXD_OPAQUE_RING_JUMBO:
6015                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6016                 desc = &tpr->rx_jmb[dest_idx].std;
6017                 map = &tpr->rx_jmb_buffers[dest_idx];
6018                 data_size = TG3_RX_JMB_MAP_SZ;
6019                 break;
6020
6021         default:
6022                 return -EINVAL;
6023         }
6024
6025         /* Do not overwrite any of the map or rp information
6026          * until we are sure we can commit to a new buffer.
6027          *
6028          * Callers depend upon this behavior and assume that
6029          * we leave everything unchanged if we fail.
6030          */
6031         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6032                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6033         if (skb_size <= PAGE_SIZE) {
6034                 data = netdev_alloc_frag(skb_size);
6035                 *frag_size = skb_size;
6036         } else {
6037                 data = kmalloc(skb_size, GFP_ATOMIC);
6038                 *frag_size = 0;
6039         }
6040         if (!data)
6041                 return -ENOMEM;
6042
6043         mapping = pci_map_single(tp->pdev,
6044                                  data + TG3_RX_OFFSET(tp),
6045                                  data_size,
6046                                  PCI_DMA_FROMDEVICE);
6047         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6048                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6049                 return -EIO;
6050         }
6051
6052         map->data = data;
6053         dma_unmap_addr_set(map, mapping, mapping);
6054
6055         desc->addr_hi = ((u64)mapping >> 32);
6056         desc->addr_lo = ((u64)mapping & 0xffffffff);
6057
6058         return data_size;
6059 }
6060
6061 /* We only need to move over in the address because the other
6062  * members of the RX descriptor are invariant.  See notes above
6063  * tg3_alloc_rx_data for full details.
6064  */
6065 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6066                            struct tg3_rx_prodring_set *dpr,
6067                            u32 opaque_key, int src_idx,
6068                            u32 dest_idx_unmasked)
6069 {
6070         struct tg3 *tp = tnapi->tp;
6071         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6072         struct ring_info *src_map, *dest_map;
6073         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6074         int dest_idx;
6075
6076         switch (opaque_key) {
6077         case RXD_OPAQUE_RING_STD:
6078                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6079                 dest_desc = &dpr->rx_std[dest_idx];
6080                 dest_map = &dpr->rx_std_buffers[dest_idx];
6081                 src_desc = &spr->rx_std[src_idx];
6082                 src_map = &spr->rx_std_buffers[src_idx];
6083                 break;
6084
6085         case RXD_OPAQUE_RING_JUMBO:
6086                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6087                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6088                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6089                 src_desc = &spr->rx_jmb[src_idx].std;
6090                 src_map = &spr->rx_jmb_buffers[src_idx];
6091                 break;
6092
6093         default:
6094                 return;
6095         }
6096
6097         dest_map->data = src_map->data;
6098         dma_unmap_addr_set(dest_map, mapping,
6099                            dma_unmap_addr(src_map, mapping));
6100         dest_desc->addr_hi = src_desc->addr_hi;
6101         dest_desc->addr_lo = src_desc->addr_lo;
6102
6103         /* Ensure that the update to the skb happens after the physical
6104          * addresses have been transferred to the new BD location.
6105          */
6106         smp_wmb();
6107
6108         src_map->data = NULL;
6109 }
6110
6111 /* The RX ring scheme is composed of multiple rings which post fresh
6112  * buffers to the chip, and one special ring the chip uses to report
6113  * status back to the host.
6114  *
6115  * The special ring reports the status of received packets to the
6116  * host.  The chip does not write into the original descriptor the
6117  * RX buffer was obtained from.  The chip simply takes the original
6118  * descriptor as provided by the host, updates the status and length
6119  * field, then writes this into the next status ring entry.
6120  *
6121  * Each ring the host uses to post buffers to the chip is described
6122  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6123  * it is first placed into the on-chip ram.  When the packet's length
6124  * is known, it walks down the TG3_BDINFO entries to select the ring.
6125  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6126  * which is within the range of the new packet's length is chosen.
6127  *
6128  * The "separate ring for rx status" scheme may sound queer, but it makes
6129  * sense from a cache coherency perspective.  If only the host writes
6130  * to the buffer post rings, and only the chip writes to the rx status
6131  * rings, then cache lines never move beyond shared-modified state.
6132  * If both the host and chip were to write into the same ring, cache line
6133  * eviction could occur since both entities want it in an exclusive state.
6134  */
6135 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6136 {
6137         struct tg3 *tp = tnapi->tp;
6138         u32 work_mask, rx_std_posted = 0;
6139         u32 std_prod_idx, jmb_prod_idx;
6140         u32 sw_idx = tnapi->rx_rcb_ptr;
6141         u16 hw_idx;
6142         int received;
6143         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6144
6145         hw_idx = *(tnapi->rx_rcb_prod_idx);
6146         /*
6147          * We need to order the read of hw_idx and the read of
6148          * the opaque cookie.
6149          */
6150         rmb();
6151         work_mask = 0;
6152         received = 0;
6153         std_prod_idx = tpr->rx_std_prod_idx;
6154         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6155         while (sw_idx != hw_idx && budget > 0) {
6156                 struct ring_info *ri;
6157                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6158                 unsigned int len;
6159                 struct sk_buff *skb;
6160                 dma_addr_t dma_addr;
6161                 u32 opaque_key, desc_idx, *post_ptr;
6162                 u8 *data;
6163                 u64 tstamp = 0;
6164
6165                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6166                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6167                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6168                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6169                         dma_addr = dma_unmap_addr(ri, mapping);
6170                         data = ri->data;
6171                         post_ptr = &std_prod_idx;
6172                         rx_std_posted++;
6173                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6174                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6175                         dma_addr = dma_unmap_addr(ri, mapping);
6176                         data = ri->data;
6177                         post_ptr = &jmb_prod_idx;
6178                 } else
6179                         goto next_pkt_nopost;
6180
6181                 work_mask |= opaque_key;
6182
6183                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6184                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6185                 drop_it:
6186                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6187                                        desc_idx, *post_ptr);
6188                 drop_it_no_recycle:
6189                         /* Other statistics kept track of by card. */
6190                         tp->rx_dropped++;
6191                         goto next_pkt;
6192                 }
6193
6194                 prefetch(data + TG3_RX_OFFSET(tp));
6195                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6196                       ETH_FCS_LEN;
6197
6198                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6199                      RXD_FLAG_PTPSTAT_PTPV1 ||
6200                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6201                      RXD_FLAG_PTPSTAT_PTPV2) {
6202                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6203                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6204                 }
6205
6206                 if (len > TG3_RX_COPY_THRESH(tp)) {
6207                         int skb_size;
6208                         unsigned int frag_size;
6209
6210                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6211                                                     *post_ptr, &frag_size);
6212                         if (skb_size < 0)
6213                                 goto drop_it;
6214
6215                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6216                                          PCI_DMA_FROMDEVICE);
6217
6218                         skb = build_skb(data, frag_size);
6219                         if (!skb) {
6220                                 tg3_frag_free(frag_size != 0, data);
6221                                 goto drop_it_no_recycle;
6222                         }
6223                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6224                         /* Ensure that the update to the data happens
6225                          * after the usage of the old DMA mapping.
6226                          */
6227                         smp_wmb();
6228
6229                         ri->data = NULL;
6230
6231                 } else {
6232                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6233                                        desc_idx, *post_ptr);
6234
6235                         skb = netdev_alloc_skb(tp->dev,
6236                                                len + TG3_RAW_IP_ALIGN);
6237                         if (skb == NULL)
6238                                 goto drop_it_no_recycle;
6239
6240                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6241                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6242                         memcpy(skb->data,
6243                                data + TG3_RX_OFFSET(tp),
6244                                len);
6245                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6246                 }
6247
6248                 skb_put(skb, len);
6249                 if (tstamp)
6250                         tg3_hwclock_to_timestamp(tp, tstamp,
6251                                                  skb_hwtstamps(skb));
6252
6253                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6254                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6255                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6256                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6257                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6258                 else
6259                         skb_checksum_none_assert(skb);
6260
6261                 skb->protocol = eth_type_trans(skb, tp->dev);
6262
6263                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6264                     skb->protocol != htons(ETH_P_8021Q)) {
6265                         dev_kfree_skb(skb);
6266                         goto drop_it_no_recycle;
6267                 }
6268
6269                 if (desc->type_flags & RXD_FLAG_VLAN &&
6270                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6271                         __vlan_hwaccel_put_tag(skb,
6272                                                desc->err_vlan & RXD_VLAN_MASK);
6273
6274                 napi_gro_receive(&tnapi->napi, skb);
6275
6276                 received++;
6277                 budget--;
6278
6279 next_pkt:
6280                 (*post_ptr)++;
6281
6282                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6283                         tpr->rx_std_prod_idx = std_prod_idx &
6284                                                tp->rx_std_ring_mask;
6285                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6286                                      tpr->rx_std_prod_idx);
6287                         work_mask &= ~RXD_OPAQUE_RING_STD;
6288                         rx_std_posted = 0;
6289                 }
6290 next_pkt_nopost:
6291                 sw_idx++;
6292                 sw_idx &= tp->rx_ret_ring_mask;
6293
6294                 /* Refresh hw_idx to see if there is new work */
6295                 if (sw_idx == hw_idx) {
6296                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6297                         rmb();
6298                 }
6299         }
6300
6301         /* ACK the status ring. */
6302         tnapi->rx_rcb_ptr = sw_idx;
6303         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6304
6305         /* Refill RX ring(s). */
6306         if (!tg3_flag(tp, ENABLE_RSS)) {
6307                 /* Sync BD data before updating mailbox */
6308                 wmb();
6309
6310                 if (work_mask & RXD_OPAQUE_RING_STD) {
6311                         tpr->rx_std_prod_idx = std_prod_idx &
6312                                                tp->rx_std_ring_mask;
6313                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6314                                      tpr->rx_std_prod_idx);
6315                 }
6316                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6317                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6318                                                tp->rx_jmb_ring_mask;
6319                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6320                                      tpr->rx_jmb_prod_idx);
6321                 }
6322                 mmiowb();
6323         } else if (work_mask) {
6324                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6325                  * updated before the producer indices can be updated.
6326                  */
6327                 smp_wmb();
6328
6329                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6330                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6331
6332                 if (tnapi != &tp->napi[1]) {
6333                         tp->rx_refill = true;
6334                         napi_schedule(&tp->napi[1].napi);
6335                 }
6336         }
6337
6338         return received;
6339 }
6340
6341 static void tg3_poll_link(struct tg3 *tp)
6342 {
6343         /* handle link change and other phy events */
6344         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6345                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6346
6347                 if (sblk->status & SD_STATUS_LINK_CHG) {
6348                         sblk->status = SD_STATUS_UPDATED |
6349                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6350                         spin_lock(&tp->lock);
6351                         if (tg3_flag(tp, USE_PHYLIB)) {
6352                                 tw32_f(MAC_STATUS,
6353                                      (MAC_STATUS_SYNC_CHANGED |
6354                                       MAC_STATUS_CFG_CHANGED |
6355                                       MAC_STATUS_MI_COMPLETION |
6356                                       MAC_STATUS_LNKSTATE_CHANGED));
6357                                 udelay(40);
6358                         } else
6359                                 tg3_setup_phy(tp, 0);
6360                         spin_unlock(&tp->lock);
6361                 }
6362         }
6363 }
6364
6365 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6366                                 struct tg3_rx_prodring_set *dpr,
6367                                 struct tg3_rx_prodring_set *spr)
6368 {
6369         u32 si, di, cpycnt, src_prod_idx;
6370         int i, err = 0;
6371
6372         while (1) {
6373                 src_prod_idx = spr->rx_std_prod_idx;
6374
6375                 /* Make sure updates to the rx_std_buffers[] entries and the
6376                  * standard producer index are seen in the correct order.
6377                  */
6378                 smp_rmb();
6379
6380                 if (spr->rx_std_cons_idx == src_prod_idx)
6381                         break;
6382
6383                 if (spr->rx_std_cons_idx < src_prod_idx)
6384                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6385                 else
6386                         cpycnt = tp->rx_std_ring_mask + 1 -
6387                                  spr->rx_std_cons_idx;
6388
6389                 cpycnt = min(cpycnt,
6390                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6391
6392                 si = spr->rx_std_cons_idx;
6393                 di = dpr->rx_std_prod_idx;
6394
6395                 for (i = di; i < di + cpycnt; i++) {
6396                         if (dpr->rx_std_buffers[i].data) {
6397                                 cpycnt = i - di;
6398                                 err = -ENOSPC;
6399                                 break;
6400                         }
6401                 }
6402
6403                 if (!cpycnt)
6404                         break;
6405
6406                 /* Ensure that updates to the rx_std_buffers ring and the
6407                  * shadowed hardware producer ring from tg3_recycle_skb() are
6408                  * ordered correctly WRT the skb check above.
6409                  */
6410                 smp_rmb();
6411
6412                 memcpy(&dpr->rx_std_buffers[di],
6413                        &spr->rx_std_buffers[si],
6414                        cpycnt * sizeof(struct ring_info));
6415
6416                 for (i = 0; i < cpycnt; i++, di++, si++) {
6417                         struct tg3_rx_buffer_desc *sbd, *dbd;
6418                         sbd = &spr->rx_std[si];
6419                         dbd = &dpr->rx_std[di];
6420                         dbd->addr_hi = sbd->addr_hi;
6421                         dbd->addr_lo = sbd->addr_lo;
6422                 }
6423
6424                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6425                                        tp->rx_std_ring_mask;
6426                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6427                                        tp->rx_std_ring_mask;
6428         }
6429
6430         while (1) {
6431                 src_prod_idx = spr->rx_jmb_prod_idx;
6432
6433                 /* Make sure updates to the rx_jmb_buffers[] entries and
6434                  * the jumbo producer index are seen in the correct order.
6435                  */
6436                 smp_rmb();
6437
6438                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6439                         break;
6440
6441                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6442                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6443                 else
6444                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6445                                  spr->rx_jmb_cons_idx;
6446
6447                 cpycnt = min(cpycnt,
6448                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6449
6450                 si = spr->rx_jmb_cons_idx;
6451                 di = dpr->rx_jmb_prod_idx;
6452
6453                 for (i = di; i < di + cpycnt; i++) {
6454                         if (dpr->rx_jmb_buffers[i].data) {
6455                                 cpycnt = i - di;
6456                                 err = -ENOSPC;
6457                                 break;
6458                         }
6459                 }
6460
6461                 if (!cpycnt)
6462                         break;
6463
6464                 /* Ensure that updates to the rx_jmb_buffers ring and the
6465                  * shadowed hardware producer ring from tg3_recycle_skb() are
6466                  * ordered correctly WRT the skb check above.
6467                  */
6468                 smp_rmb();
6469
6470                 memcpy(&dpr->rx_jmb_buffers[di],
6471                        &spr->rx_jmb_buffers[si],
6472                        cpycnt * sizeof(struct ring_info));
6473
6474                 for (i = 0; i < cpycnt; i++, di++, si++) {
6475                         struct tg3_rx_buffer_desc *sbd, *dbd;
6476                         sbd = &spr->rx_jmb[si].std;
6477                         dbd = &dpr->rx_jmb[di].std;
6478                         dbd->addr_hi = sbd->addr_hi;
6479                         dbd->addr_lo = sbd->addr_lo;
6480                 }
6481
6482                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6483                                        tp->rx_jmb_ring_mask;
6484                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6485                                        tp->rx_jmb_ring_mask;
6486         }
6487
6488         return err;
6489 }
6490
6491 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6492 {
6493         struct tg3 *tp = tnapi->tp;
6494
6495         /* run TX completion thread */
6496         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6497                 tg3_tx(tnapi);
6498                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6499                         return work_done;
6500         }
6501
6502         if (!tnapi->rx_rcb_prod_idx)
6503                 return work_done;
6504
6505         /* run RX thread, within the bounds set by NAPI.
6506          * All RX "locking" is done by ensuring outside
6507          * code synchronizes with tg3->napi.poll()
6508          */
6509         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6510                 work_done += tg3_rx(tnapi, budget - work_done);
6511
6512         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6513                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6514                 int i, err = 0;
6515                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6516                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6517
6518                 tp->rx_refill = false;
6519                 for (i = 1; i <= tp->rxq_cnt; i++)
6520                         err |= tg3_rx_prodring_xfer(tp, dpr,
6521                                                     &tp->napi[i].prodring);
6522
6523                 wmb();
6524
6525                 if (std_prod_idx != dpr->rx_std_prod_idx)
6526                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6527                                      dpr->rx_std_prod_idx);
6528
6529                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6530                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6531                                      dpr->rx_jmb_prod_idx);
6532
6533                 mmiowb();
6534
6535                 if (err)
6536                         tw32_f(HOSTCC_MODE, tp->coal_now);
6537         }
6538
6539         return work_done;
6540 }
6541
6542 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6543 {
6544         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6545                 schedule_work(&tp->reset_task);
6546 }
6547
6548 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6549 {
6550         cancel_work_sync(&tp->reset_task);
6551         tg3_flag_clear(tp, RESET_TASK_PENDING);
6552         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6553 }
6554
6555 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6556 {
6557         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6558         struct tg3 *tp = tnapi->tp;
6559         int work_done = 0;
6560         struct tg3_hw_status *sblk = tnapi->hw_status;
6561
6562         while (1) {
6563                 work_done = tg3_poll_work(tnapi, work_done, budget);
6564
6565                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6566                         goto tx_recovery;
6567
6568                 if (unlikely(work_done >= budget))
6569                         break;
6570
6571                 /* tp->last_tag is used in tg3_int_reenable() below
6572                  * to tell the hw how much work has been processed,
6573                  * so we must read it before checking for more work.
6574                  */
6575                 tnapi->last_tag = sblk->status_tag;
6576                 tnapi->last_irq_tag = tnapi->last_tag;
6577                 rmb();
6578
6579                 /* check for RX/TX work to do */
6580                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6581                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6582
6583                         /* This test here is not race free, but will reduce
6584                          * the number of interrupts by looping again.
6585                          */
6586                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6587                                 continue;
6588
6589                         napi_complete(napi);
6590                         /* Reenable interrupts. */
6591                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6592
6593                         /* This test here is synchronized by napi_schedule()
6594                          * and napi_complete() to close the race condition.
6595                          */
6596                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6597                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6598                                                   HOSTCC_MODE_ENABLE |
6599                                                   tnapi->coal_now);
6600                         }
6601                         mmiowb();
6602                         break;
6603                 }
6604         }
6605
6606         return work_done;
6607
6608 tx_recovery:
6609         /* work_done is guaranteed to be less than budget. */
6610         napi_complete(napi);
6611         tg3_reset_task_schedule(tp);
6612         return work_done;
6613 }
6614
6615 static void tg3_process_error(struct tg3 *tp)
6616 {
6617         u32 val;
6618         bool real_error = false;
6619
6620         if (tg3_flag(tp, ERROR_PROCESSED))
6621                 return;
6622
6623         /* Check Flow Attention register */
6624         val = tr32(HOSTCC_FLOW_ATTN);
6625         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6626                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6627                 real_error = true;
6628         }
6629
6630         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6631                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6632                 real_error = true;
6633         }
6634
6635         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6636                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6637                 real_error = true;
6638         }
6639
6640         if (!real_error)
6641                 return;
6642
6643         tg3_dump_state(tp);
6644
6645         tg3_flag_set(tp, ERROR_PROCESSED);
6646         tg3_reset_task_schedule(tp);
6647 }
6648
6649 static int tg3_poll(struct napi_struct *napi, int budget)
6650 {
6651         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6652         struct tg3 *tp = tnapi->tp;
6653         int work_done = 0;
6654         struct tg3_hw_status *sblk = tnapi->hw_status;
6655
6656         while (1) {
6657                 if (sblk->status & SD_STATUS_ERROR)
6658                         tg3_process_error(tp);
6659
6660                 tg3_poll_link(tp);
6661
6662                 work_done = tg3_poll_work(tnapi, work_done, budget);
6663
6664                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6665                         goto tx_recovery;
6666
6667                 if (unlikely(work_done >= budget))
6668                         break;
6669
6670                 if (tg3_flag(tp, TAGGED_STATUS)) {
6671                         /* tp->last_tag is used in tg3_int_reenable() below
6672                          * to tell the hw how much work has been processed,
6673                          * so we must read it before checking for more work.
6674                          */
6675                         tnapi->last_tag = sblk->status_tag;
6676                         tnapi->last_irq_tag = tnapi->last_tag;
6677                         rmb();
6678                 } else
6679                         sblk->status &= ~SD_STATUS_UPDATED;
6680
6681                 if (likely(!tg3_has_work(tnapi))) {
6682                         napi_complete(napi);
6683                         tg3_int_reenable(tnapi);
6684                         break;
6685                 }
6686         }
6687
6688         return work_done;
6689
6690 tx_recovery:
6691         /* work_done is guaranteed to be less than budget. */
6692         napi_complete(napi);
6693         tg3_reset_task_schedule(tp);
6694         return work_done;
6695 }
6696
6697 static void tg3_napi_disable(struct tg3 *tp)
6698 {
6699         int i;
6700
6701         for (i = tp->irq_cnt - 1; i >= 0; i--)
6702                 napi_disable(&tp->napi[i].napi);
6703 }
6704
6705 static void tg3_napi_enable(struct tg3 *tp)
6706 {
6707         int i;
6708
6709         for (i = 0; i < tp->irq_cnt; i++)
6710                 napi_enable(&tp->napi[i].napi);
6711 }
6712
6713 static void tg3_napi_init(struct tg3 *tp)
6714 {
6715         int i;
6716
6717         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6718         for (i = 1; i < tp->irq_cnt; i++)
6719                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6720 }
6721
6722 static void tg3_napi_fini(struct tg3 *tp)
6723 {
6724         int i;
6725
6726         for (i = 0; i < tp->irq_cnt; i++)
6727                 netif_napi_del(&tp->napi[i].napi);
6728 }
6729
6730 static inline void tg3_netif_stop(struct tg3 *tp)
6731 {
6732         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6733         tg3_napi_disable(tp);
6734         netif_carrier_off(tp->dev);
6735         netif_tx_disable(tp->dev);
6736 }
6737
6738 /* tp->lock must be held */
6739 static inline void tg3_netif_start(struct tg3 *tp)
6740 {
6741         tg3_ptp_resume(tp);
6742
6743         /* NOTE: unconditional netif_tx_wake_all_queues is only
6744          * appropriate so long as all callers are assured to
6745          * have free tx slots (such as after tg3_init_hw)
6746          */
6747         netif_tx_wake_all_queues(tp->dev);
6748
6749         if (tp->link_up)
6750                 netif_carrier_on(tp->dev);
6751
6752         tg3_napi_enable(tp);
6753         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6754         tg3_enable_ints(tp);
6755 }
6756
6757 static void tg3_irq_quiesce(struct tg3 *tp)
6758 {
6759         int i;
6760
6761         BUG_ON(tp->irq_sync);
6762
6763         tp->irq_sync = 1;
6764         smp_mb();
6765
6766         for (i = 0; i < tp->irq_cnt; i++)
6767                 synchronize_irq(tp->napi[i].irq_vec);
6768 }
6769
6770 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6771  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6772  * with as well.  Most of the time, this is not necessary except when
6773  * shutting down the device.
6774  */
6775 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6776 {
6777         spin_lock_bh(&tp->lock);
6778         if (irq_sync)
6779                 tg3_irq_quiesce(tp);
6780 }
6781
6782 static inline void tg3_full_unlock(struct tg3 *tp)
6783 {
6784         spin_unlock_bh(&tp->lock);
6785 }
6786
6787 /* One-shot MSI handler - Chip automatically disables interrupt
6788  * after sending MSI so driver doesn't have to do it.
6789  */
6790 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6791 {
6792         struct tg3_napi *tnapi = dev_id;
6793         struct tg3 *tp = tnapi->tp;
6794
6795         prefetch(tnapi->hw_status);
6796         if (tnapi->rx_rcb)
6797                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6798
6799         if (likely(!tg3_irq_sync(tp)))
6800                 napi_schedule(&tnapi->napi);
6801
6802         return IRQ_HANDLED;
6803 }
6804
6805 /* MSI ISR - No need to check for interrupt sharing and no need to
6806  * flush status block and interrupt mailbox. PCI ordering rules
6807  * guarantee that MSI will arrive after the status block.
6808  */
6809 static irqreturn_t tg3_msi(int irq, void *dev_id)
6810 {
6811         struct tg3_napi *tnapi = dev_id;
6812         struct tg3 *tp = tnapi->tp;
6813
6814         prefetch(tnapi->hw_status);
6815         if (tnapi->rx_rcb)
6816                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6817         /*
6818          * Writing any value to intr-mbox-0 clears PCI INTA# and
6819          * chip-internal interrupt pending events.
6820          * Writing non-zero to intr-mbox-0 additional tells the
6821          * NIC to stop sending us irqs, engaging "in-intr-handler"
6822          * event coalescing.
6823          */
6824         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6825         if (likely(!tg3_irq_sync(tp)))
6826                 napi_schedule(&tnapi->napi);
6827
6828         return IRQ_RETVAL(1);
6829 }
6830
6831 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6832 {
6833         struct tg3_napi *tnapi = dev_id;
6834         struct tg3 *tp = tnapi->tp;
6835         struct tg3_hw_status *sblk = tnapi->hw_status;
6836         unsigned int handled = 1;
6837
6838         /* In INTx mode, it is possible for the interrupt to arrive at
6839          * the CPU before the status block posted prior to the interrupt.
6840          * Reading the PCI State register will confirm whether the
6841          * interrupt is ours and will flush the status block.
6842          */
6843         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6844                 if (tg3_flag(tp, CHIP_RESETTING) ||
6845                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6846                         handled = 0;
6847                         goto out;
6848                 }
6849         }
6850
6851         /*
6852          * Writing any value to intr-mbox-0 clears PCI INTA# and
6853          * chip-internal interrupt pending events.
6854          * Writing non-zero to intr-mbox-0 additional tells the
6855          * NIC to stop sending us irqs, engaging "in-intr-handler"
6856          * event coalescing.
6857          *
6858          * Flush the mailbox to de-assert the IRQ immediately to prevent
6859          * spurious interrupts.  The flush impacts performance but
6860          * excessive spurious interrupts can be worse in some cases.
6861          */
6862         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6863         if (tg3_irq_sync(tp))
6864                 goto out;
6865         sblk->status &= ~SD_STATUS_UPDATED;
6866         if (likely(tg3_has_work(tnapi))) {
6867                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6868                 napi_schedule(&tnapi->napi);
6869         } else {
6870                 /* No work, shared interrupt perhaps?  re-enable
6871                  * interrupts, and flush that PCI write
6872                  */
6873                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6874                                0x00000000);
6875         }
6876 out:
6877         return IRQ_RETVAL(handled);
6878 }
6879
6880 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6881 {
6882         struct tg3_napi *tnapi = dev_id;
6883         struct tg3 *tp = tnapi->tp;
6884         struct tg3_hw_status *sblk = tnapi->hw_status;
6885         unsigned int handled = 1;
6886
6887         /* In INTx mode, it is possible for the interrupt to arrive at
6888          * the CPU before the status block posted prior to the interrupt.
6889          * Reading the PCI State register will confirm whether the
6890          * interrupt is ours and will flush the status block.
6891          */
6892         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6893                 if (tg3_flag(tp, CHIP_RESETTING) ||
6894                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6895                         handled = 0;
6896                         goto out;
6897                 }
6898         }
6899
6900         /*
6901          * writing any value to intr-mbox-0 clears PCI INTA# and
6902          * chip-internal interrupt pending events.
6903          * writing non-zero to intr-mbox-0 additional tells the
6904          * NIC to stop sending us irqs, engaging "in-intr-handler"
6905          * event coalescing.
6906          *
6907          * Flush the mailbox to de-assert the IRQ immediately to prevent
6908          * spurious interrupts.  The flush impacts performance but
6909          * excessive spurious interrupts can be worse in some cases.
6910          */
6911         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6912
6913         /*
6914          * In a shared interrupt configuration, sometimes other devices'
6915          * interrupts will scream.  We record the current status tag here
6916          * so that the above check can report that the screaming interrupts
6917          * are unhandled.  Eventually they will be silenced.
6918          */
6919         tnapi->last_irq_tag = sblk->status_tag;
6920
6921         if (tg3_irq_sync(tp))
6922                 goto out;
6923
6924         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6925
6926         napi_schedule(&tnapi->napi);
6927
6928 out:
6929         return IRQ_RETVAL(handled);
6930 }
6931
6932 /* ISR for interrupt test */
6933 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6934 {
6935         struct tg3_napi *tnapi = dev_id;
6936         struct tg3 *tp = tnapi->tp;
6937         struct tg3_hw_status *sblk = tnapi->hw_status;
6938
6939         if ((sblk->status & SD_STATUS_UPDATED) ||
6940             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6941                 tg3_disable_ints(tp);
6942                 return IRQ_RETVAL(1);
6943         }
6944         return IRQ_RETVAL(0);
6945 }
6946
6947 #ifdef CONFIG_NET_POLL_CONTROLLER
6948 static void tg3_poll_controller(struct net_device *dev)
6949 {
6950         int i;
6951         struct tg3 *tp = netdev_priv(dev);
6952
6953         for (i = 0; i < tp->irq_cnt; i++)
6954                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6955 }
6956 #endif
6957
6958 static void tg3_tx_timeout(struct net_device *dev)
6959 {
6960         struct tg3 *tp = netdev_priv(dev);
6961
6962         if (netif_msg_tx_err(tp)) {
6963                 netdev_err(dev, "transmit timed out, resetting\n");
6964                 tg3_dump_state(tp);
6965         }
6966
6967         tg3_reset_task_schedule(tp);
6968 }
6969
6970 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6971 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6972 {
6973         u32 base = (u32) mapping & 0xffffffff;
6974
6975         return (base > 0xffffdcc0) && (base + len + 8 < base);
6976 }
6977
6978 /* Test for DMA addresses > 40-bit */
6979 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6980                                           int len)
6981 {
6982 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6983         if (tg3_flag(tp, 40BIT_DMA_BUG))
6984                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6985         return 0;
6986 #else
6987         return 0;
6988 #endif
6989 }
6990
6991 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6992                                  dma_addr_t mapping, u32 len, u32 flags,
6993                                  u32 mss, u32 vlan)
6994 {
6995         txbd->addr_hi = ((u64) mapping >> 32);
6996         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6997         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6998         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6999 }
7000
7001 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7002                             dma_addr_t map, u32 len, u32 flags,
7003                             u32 mss, u32 vlan)
7004 {
7005         struct tg3 *tp = tnapi->tp;
7006         bool hwbug = false;
7007
7008         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7009                 hwbug = true;
7010
7011         if (tg3_4g_overflow_test(map, len))
7012                 hwbug = true;
7013
7014         if (tg3_40bit_overflow_test(tp, map, len))
7015                 hwbug = true;
7016
7017         if (tp->dma_limit) {
7018                 u32 prvidx = *entry;
7019                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7020                 while (len > tp->dma_limit && *budget) {
7021                         u32 frag_len = tp->dma_limit;
7022                         len -= tp->dma_limit;
7023
7024                         /* Avoid the 8byte DMA problem */
7025                         if (len <= 8) {
7026                                 len += tp->dma_limit / 2;
7027                                 frag_len = tp->dma_limit / 2;
7028                         }
7029
7030                         tnapi->tx_buffers[*entry].fragmented = true;
7031
7032                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7033                                       frag_len, tmp_flag, mss, vlan);
7034                         *budget -= 1;
7035                         prvidx = *entry;
7036                         *entry = NEXT_TX(*entry);
7037
7038                         map += frag_len;
7039                 }
7040
7041                 if (len) {
7042                         if (*budget) {
7043                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7044                                               len, flags, mss, vlan);
7045                                 *budget -= 1;
7046                                 *entry = NEXT_TX(*entry);
7047                         } else {
7048                                 hwbug = true;
7049                                 tnapi->tx_buffers[prvidx].fragmented = false;
7050                         }
7051                 }
7052         } else {
7053                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7054                               len, flags, mss, vlan);
7055                 *entry = NEXT_TX(*entry);
7056         }
7057
7058         return hwbug;
7059 }
7060
7061 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7062 {
7063         int i;
7064         struct sk_buff *skb;
7065         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7066
7067         skb = txb->skb;
7068         txb->skb = NULL;
7069
7070         pci_unmap_single(tnapi->tp->pdev,
7071                          dma_unmap_addr(txb, mapping),
7072                          skb_headlen(skb),
7073                          PCI_DMA_TODEVICE);
7074
7075         while (txb->fragmented) {
7076                 txb->fragmented = false;
7077                 entry = NEXT_TX(entry);
7078                 txb = &tnapi->tx_buffers[entry];
7079         }
7080
7081         for (i = 0; i <= last; i++) {
7082                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7083
7084                 entry = NEXT_TX(entry);
7085                 txb = &tnapi->tx_buffers[entry];
7086
7087                 pci_unmap_page(tnapi->tp->pdev,
7088                                dma_unmap_addr(txb, mapping),
7089                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7090
7091                 while (txb->fragmented) {
7092                         txb->fragmented = false;
7093                         entry = NEXT_TX(entry);
7094                         txb = &tnapi->tx_buffers[entry];
7095                 }
7096         }
7097 }
7098
7099 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7100 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7101                                        struct sk_buff **pskb,
7102                                        u32 *entry, u32 *budget,
7103                                        u32 base_flags, u32 mss, u32 vlan)
7104 {
7105         struct tg3 *tp = tnapi->tp;
7106         struct sk_buff *new_skb, *skb = *pskb;
7107         dma_addr_t new_addr = 0;
7108         int ret = 0;
7109
7110         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7111                 new_skb = skb_copy(skb, GFP_ATOMIC);
7112         else {
7113                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7114
7115                 new_skb = skb_copy_expand(skb,
7116                                           skb_headroom(skb) + more_headroom,
7117                                           skb_tailroom(skb), GFP_ATOMIC);
7118         }
7119
7120         if (!new_skb) {
7121                 ret = -1;
7122         } else {
7123                 /* New SKB is guaranteed to be linear. */
7124                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7125                                           PCI_DMA_TODEVICE);
7126                 /* Make sure the mapping succeeded */
7127                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7128                         dev_kfree_skb(new_skb);
7129                         ret = -1;
7130                 } else {
7131                         u32 save_entry = *entry;
7132
7133                         base_flags |= TXD_FLAG_END;
7134
7135                         tnapi->tx_buffers[*entry].skb = new_skb;
7136                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7137                                            mapping, new_addr);
7138
7139                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7140                                             new_skb->len, base_flags,
7141                                             mss, vlan)) {
7142                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7143                                 dev_kfree_skb(new_skb);
7144                                 ret = -1;
7145                         }
7146                 }
7147         }
7148
7149         dev_kfree_skb(skb);
7150         *pskb = new_skb;
7151         return ret;
7152 }
7153
7154 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7155
7156 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7157  * TSO header is greater than 80 bytes.
7158  */
7159 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7160 {
7161         struct sk_buff *segs, *nskb;
7162         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7163
7164         /* Estimate the number of fragments in the worst case */
7165         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7166                 netif_stop_queue(tp->dev);
7167
7168                 /* netif_tx_stop_queue() must be done before checking
7169                  * checking tx index in tg3_tx_avail() below, because in
7170                  * tg3_tx(), we update tx index before checking for
7171                  * netif_tx_queue_stopped().
7172                  */
7173                 smp_mb();
7174                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7175                         return NETDEV_TX_BUSY;
7176
7177                 netif_wake_queue(tp->dev);
7178         }
7179
7180         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7181         if (IS_ERR(segs))
7182                 goto tg3_tso_bug_end;
7183
7184         do {
7185                 nskb = segs;
7186                 segs = segs->next;
7187                 nskb->next = NULL;
7188                 tg3_start_xmit(nskb, tp->dev);
7189         } while (segs);
7190
7191 tg3_tso_bug_end:
7192         dev_kfree_skb(skb);
7193
7194         return NETDEV_TX_OK;
7195 }
7196
7197 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7198  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7199  */
7200 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7201 {
7202         struct tg3 *tp = netdev_priv(dev);
7203         u32 len, entry, base_flags, mss, vlan = 0;
7204         u32 budget;
7205         int i = -1, would_hit_hwbug;
7206         dma_addr_t mapping;
7207         struct tg3_napi *tnapi;
7208         struct netdev_queue *txq;
7209         unsigned int last;
7210
7211         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7212         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7213         if (tg3_flag(tp, ENABLE_TSS))
7214                 tnapi++;
7215
7216         budget = tg3_tx_avail(tnapi);
7217
7218         /* We are running in BH disabled context with netif_tx_lock
7219          * and TX reclaim runs via tp->napi.poll inside of a software
7220          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7221          * no IRQ context deadlocks to worry about either.  Rejoice!
7222          */
7223         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7224                 if (!netif_tx_queue_stopped(txq)) {
7225                         netif_tx_stop_queue(txq);
7226
7227                         /* This is a hard error, log it. */
7228                         netdev_err(dev,
7229                                    "BUG! Tx Ring full when queue awake!\n");
7230                 }
7231                 return NETDEV_TX_BUSY;
7232         }
7233
7234         entry = tnapi->tx_prod;
7235         base_flags = 0;
7236         if (skb->ip_summed == CHECKSUM_PARTIAL)
7237                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7238
7239         mss = skb_shinfo(skb)->gso_size;
7240         if (mss) {
7241                 struct iphdr *iph;
7242                 u32 tcp_opt_len, hdr_len;
7243
7244                 if (skb_header_cloned(skb) &&
7245                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7246                         goto drop;
7247
7248                 iph = ip_hdr(skb);
7249                 tcp_opt_len = tcp_optlen(skb);
7250
7251                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7252
7253                 if (!skb_is_gso_v6(skb)) {
7254                         iph->check = 0;
7255                         iph->tot_len = htons(mss + hdr_len);
7256                 }
7257
7258                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7259                     tg3_flag(tp, TSO_BUG))
7260                         return tg3_tso_bug(tp, skb);
7261
7262                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7263                                TXD_FLAG_CPU_POST_DMA);
7264
7265                 if (tg3_flag(tp, HW_TSO_1) ||
7266                     tg3_flag(tp, HW_TSO_2) ||
7267                     tg3_flag(tp, HW_TSO_3)) {
7268                         tcp_hdr(skb)->check = 0;
7269                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7270                 } else
7271                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7272                                                                  iph->daddr, 0,
7273                                                                  IPPROTO_TCP,
7274                                                                  0);
7275
7276                 if (tg3_flag(tp, HW_TSO_3)) {
7277                         mss |= (hdr_len & 0xc) << 12;
7278                         if (hdr_len & 0x10)
7279                                 base_flags |= 0x00000010;
7280                         base_flags |= (hdr_len & 0x3e0) << 5;
7281                 } else if (tg3_flag(tp, HW_TSO_2))
7282                         mss |= hdr_len << 9;
7283                 else if (tg3_flag(tp, HW_TSO_1) ||
7284                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7285                         if (tcp_opt_len || iph->ihl > 5) {
7286                                 int tsflags;
7287
7288                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7289                                 mss |= (tsflags << 11);
7290                         }
7291                 } else {
7292                         if (tcp_opt_len || iph->ihl > 5) {
7293                                 int tsflags;
7294
7295                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7296                                 base_flags |= tsflags << 12;
7297                         }
7298                 }
7299         }
7300
7301         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7302             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7303                 base_flags |= TXD_FLAG_JMB_PKT;
7304
7305         if (vlan_tx_tag_present(skb)) {
7306                 base_flags |= TXD_FLAG_VLAN;
7307                 vlan = vlan_tx_tag_get(skb);
7308         }
7309
7310         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7311             tg3_flag(tp, TX_TSTAMP_EN)) {
7312                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7313                 base_flags |= TXD_FLAG_HWTSTAMP;
7314         }
7315
7316         len = skb_headlen(skb);
7317
7318         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7319         if (pci_dma_mapping_error(tp->pdev, mapping))
7320                 goto drop;
7321
7322
7323         tnapi->tx_buffers[entry].skb = skb;
7324         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7325
7326         would_hit_hwbug = 0;
7327
7328         if (tg3_flag(tp, 5701_DMA_BUG))
7329                 would_hit_hwbug = 1;
7330
7331         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7332                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7333                             mss, vlan)) {
7334                 would_hit_hwbug = 1;
7335         } else if (skb_shinfo(skb)->nr_frags > 0) {
7336                 u32 tmp_mss = mss;
7337
7338                 if (!tg3_flag(tp, HW_TSO_1) &&
7339                     !tg3_flag(tp, HW_TSO_2) &&
7340                     !tg3_flag(tp, HW_TSO_3))
7341                         tmp_mss = 0;
7342
7343                 /* Now loop through additional data
7344                  * fragments, and queue them.
7345                  */
7346                 last = skb_shinfo(skb)->nr_frags - 1;
7347                 for (i = 0; i <= last; i++) {
7348                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7349
7350                         len = skb_frag_size(frag);
7351                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7352                                                    len, DMA_TO_DEVICE);
7353
7354                         tnapi->tx_buffers[entry].skb = NULL;
7355                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7356                                            mapping);
7357                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7358                                 goto dma_error;
7359
7360                         if (!budget ||
7361                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7362                                             len, base_flags |
7363                                             ((i == last) ? TXD_FLAG_END : 0),
7364                                             tmp_mss, vlan)) {
7365                                 would_hit_hwbug = 1;
7366                                 break;
7367                         }
7368                 }
7369         }
7370
7371         if (would_hit_hwbug) {
7372                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7373
7374                 /* If the workaround fails due to memory/mapping
7375                  * failure, silently drop this packet.
7376                  */
7377                 entry = tnapi->tx_prod;
7378                 budget = tg3_tx_avail(tnapi);
7379                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7380                                                 base_flags, mss, vlan))
7381                         goto drop_nofree;
7382         }
7383
7384         skb_tx_timestamp(skb);
7385         netdev_tx_sent_queue(txq, skb->len);
7386
7387         /* Sync BD data before updating mailbox */
7388         wmb();
7389
7390         /* Packets are ready, update Tx producer idx local and on card. */
7391         tw32_tx_mbox(tnapi->prodmbox, entry);
7392
7393         tnapi->tx_prod = entry;
7394         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7395                 netif_tx_stop_queue(txq);
7396
7397                 /* netif_tx_stop_queue() must be done before checking
7398                  * checking tx index in tg3_tx_avail() below, because in
7399                  * tg3_tx(), we update tx index before checking for
7400                  * netif_tx_queue_stopped().
7401                  */
7402                 smp_mb();
7403                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7404                         netif_tx_wake_queue(txq);
7405         }
7406
7407         mmiowb();
7408         return NETDEV_TX_OK;
7409
7410 dma_error:
7411         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7412         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7413 drop:
7414         dev_kfree_skb(skb);
7415 drop_nofree:
7416         tp->tx_dropped++;
7417         return NETDEV_TX_OK;
7418 }
7419
7420 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7421 {
7422         if (enable) {
7423                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7424                                   MAC_MODE_PORT_MODE_MASK);
7425
7426                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7427
7428                 if (!tg3_flag(tp, 5705_PLUS))
7429                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7430
7431                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7432                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7433                 else
7434                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7435         } else {
7436                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7437
7438                 if (tg3_flag(tp, 5705_PLUS) ||
7439                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7440                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7441                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7442         }
7443
7444         tw32(MAC_MODE, tp->mac_mode);
7445         udelay(40);
7446 }
7447
7448 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7449 {
7450         u32 val, bmcr, mac_mode, ptest = 0;
7451
7452         tg3_phy_toggle_apd(tp, false);
7453         tg3_phy_toggle_automdix(tp, 0);
7454
7455         if (extlpbk && tg3_phy_set_extloopbk(tp))
7456                 return -EIO;
7457
7458         bmcr = BMCR_FULLDPLX;
7459         switch (speed) {
7460         case SPEED_10:
7461                 break;
7462         case SPEED_100:
7463                 bmcr |= BMCR_SPEED100;
7464                 break;
7465         case SPEED_1000:
7466         default:
7467                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7468                         speed = SPEED_100;
7469                         bmcr |= BMCR_SPEED100;
7470                 } else {
7471                         speed = SPEED_1000;
7472                         bmcr |= BMCR_SPEED1000;
7473                 }
7474         }
7475
7476         if (extlpbk) {
7477                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7478                         tg3_readphy(tp, MII_CTRL1000, &val);
7479                         val |= CTL1000_AS_MASTER |
7480                                CTL1000_ENABLE_MASTER;
7481                         tg3_writephy(tp, MII_CTRL1000, val);
7482                 } else {
7483                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7484                                 MII_TG3_FET_PTEST_TRIM_2;
7485                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7486                 }
7487         } else
7488                 bmcr |= BMCR_LOOPBACK;
7489
7490         tg3_writephy(tp, MII_BMCR, bmcr);
7491
7492         /* The write needs to be flushed for the FETs */
7493         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7494                 tg3_readphy(tp, MII_BMCR, &bmcr);
7495
7496         udelay(40);
7497
7498         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7499             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7500                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7501                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7502                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7503
7504                 /* The write needs to be flushed for the AC131 */
7505                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7506         }
7507
7508         /* Reset to prevent losing 1st rx packet intermittently */
7509         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7510             tg3_flag(tp, 5780_CLASS)) {
7511                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7512                 udelay(10);
7513                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7514         }
7515
7516         mac_mode = tp->mac_mode &
7517                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7518         if (speed == SPEED_1000)
7519                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7520         else
7521                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7522
7523         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7524                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7525
7526                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7527                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7528                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7529                         mac_mode |= MAC_MODE_LINK_POLARITY;
7530
7531                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7532                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7533         }
7534
7535         tw32(MAC_MODE, mac_mode);
7536         udelay(40);
7537
7538         return 0;
7539 }
7540
7541 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7542 {
7543         struct tg3 *tp = netdev_priv(dev);
7544
7545         if (features & NETIF_F_LOOPBACK) {
7546                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7547                         return;
7548
7549                 spin_lock_bh(&tp->lock);
7550                 tg3_mac_loopback(tp, true);
7551                 netif_carrier_on(tp->dev);
7552                 spin_unlock_bh(&tp->lock);
7553                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7554         } else {
7555                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7556                         return;
7557
7558                 spin_lock_bh(&tp->lock);
7559                 tg3_mac_loopback(tp, false);
7560                 /* Force link status check */
7561                 tg3_setup_phy(tp, 1);
7562                 spin_unlock_bh(&tp->lock);
7563                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7564         }
7565 }
7566
7567 static netdev_features_t tg3_fix_features(struct net_device *dev,
7568         netdev_features_t features)
7569 {
7570         struct tg3 *tp = netdev_priv(dev);
7571
7572         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7573                 features &= ~NETIF_F_ALL_TSO;
7574
7575         return features;
7576 }
7577
7578 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7579 {
7580         netdev_features_t changed = dev->features ^ features;
7581
7582         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7583                 tg3_set_loopback(dev, features);
7584
7585         return 0;
7586 }
7587
7588 static void tg3_rx_prodring_free(struct tg3 *tp,
7589                                  struct tg3_rx_prodring_set *tpr)
7590 {
7591         int i;
7592
7593         if (tpr != &tp->napi[0].prodring) {
7594                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7595                      i = (i + 1) & tp->rx_std_ring_mask)
7596                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7597                                         tp->rx_pkt_map_sz);
7598
7599                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7600                         for (i = tpr->rx_jmb_cons_idx;
7601                              i != tpr->rx_jmb_prod_idx;
7602                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7603                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7604                                                 TG3_RX_JMB_MAP_SZ);
7605                         }
7606                 }
7607
7608                 return;
7609         }
7610
7611         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7612                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7613                                 tp->rx_pkt_map_sz);
7614
7615         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7616                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7617                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7618                                         TG3_RX_JMB_MAP_SZ);
7619         }
7620 }
7621
7622 /* Initialize rx rings for packet processing.
7623  *
7624  * The chip has been shut down and the driver detached from
7625  * the networking, so no interrupts or new tx packets will
7626  * end up in the driver.  tp->{tx,}lock are held and thus
7627  * we may not sleep.
7628  */
7629 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7630                                  struct tg3_rx_prodring_set *tpr)
7631 {
7632         u32 i, rx_pkt_dma_sz;
7633
7634         tpr->rx_std_cons_idx = 0;
7635         tpr->rx_std_prod_idx = 0;
7636         tpr->rx_jmb_cons_idx = 0;
7637         tpr->rx_jmb_prod_idx = 0;
7638
7639         if (tpr != &tp->napi[0].prodring) {
7640                 memset(&tpr->rx_std_buffers[0], 0,
7641                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7642                 if (tpr->rx_jmb_buffers)
7643                         memset(&tpr->rx_jmb_buffers[0], 0,
7644                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7645                 goto done;
7646         }
7647
7648         /* Zero out all descriptors. */
7649         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7650
7651         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7652         if (tg3_flag(tp, 5780_CLASS) &&
7653             tp->dev->mtu > ETH_DATA_LEN)
7654                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7655         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7656
7657         /* Initialize invariants of the rings, we only set this
7658          * stuff once.  This works because the card does not
7659          * write into the rx buffer posting rings.
7660          */
7661         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7662                 struct tg3_rx_buffer_desc *rxd;
7663
7664                 rxd = &tpr->rx_std[i];
7665                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7666                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7667                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7668                                (i << RXD_OPAQUE_INDEX_SHIFT));
7669         }
7670
7671         /* Now allocate fresh SKBs for each rx ring. */
7672         for (i = 0; i < tp->rx_pending; i++) {
7673                 unsigned int frag_size;
7674
7675                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7676                                       &frag_size) < 0) {
7677                         netdev_warn(tp->dev,
7678                                     "Using a smaller RX standard ring. Only "
7679                                     "%d out of %d buffers were allocated "
7680                                     "successfully\n", i, tp->rx_pending);
7681                         if (i == 0)
7682                                 goto initfail;
7683                         tp->rx_pending = i;
7684                         break;
7685                 }
7686         }
7687
7688         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7689                 goto done;
7690
7691         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7692
7693         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7694                 goto done;
7695
7696         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7697                 struct tg3_rx_buffer_desc *rxd;
7698
7699                 rxd = &tpr->rx_jmb[i].std;
7700                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7701                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7702                                   RXD_FLAG_JUMBO;
7703                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7704                        (i << RXD_OPAQUE_INDEX_SHIFT));
7705         }
7706
7707         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7708                 unsigned int frag_size;
7709
7710                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7711                                       &frag_size) < 0) {
7712                         netdev_warn(tp->dev,
7713                                     "Using a smaller RX jumbo ring. Only %d "
7714                                     "out of %d buffers were allocated "
7715                                     "successfully\n", i, tp->rx_jumbo_pending);
7716                         if (i == 0)
7717                                 goto initfail;
7718                         tp->rx_jumbo_pending = i;
7719                         break;
7720                 }
7721         }
7722
7723 done:
7724         return 0;
7725
7726 initfail:
7727         tg3_rx_prodring_free(tp, tpr);
7728         return -ENOMEM;
7729 }
7730
7731 static void tg3_rx_prodring_fini(struct tg3 *tp,
7732                                  struct tg3_rx_prodring_set *tpr)
7733 {
7734         kfree(tpr->rx_std_buffers);
7735         tpr->rx_std_buffers = NULL;
7736         kfree(tpr->rx_jmb_buffers);
7737         tpr->rx_jmb_buffers = NULL;
7738         if (tpr->rx_std) {
7739                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7740                                   tpr->rx_std, tpr->rx_std_mapping);
7741                 tpr->rx_std = NULL;
7742         }
7743         if (tpr->rx_jmb) {
7744                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7745                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7746                 tpr->rx_jmb = NULL;
7747         }
7748 }
7749
7750 static int tg3_rx_prodring_init(struct tg3 *tp,
7751                                 struct tg3_rx_prodring_set *tpr)
7752 {
7753         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7754                                       GFP_KERNEL);
7755         if (!tpr->rx_std_buffers)
7756                 return -ENOMEM;
7757
7758         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7759                                          TG3_RX_STD_RING_BYTES(tp),
7760                                          &tpr->rx_std_mapping,
7761                                          GFP_KERNEL);
7762         if (!tpr->rx_std)
7763                 goto err_out;
7764
7765         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7766                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7767                                               GFP_KERNEL);
7768                 if (!tpr->rx_jmb_buffers)
7769                         goto err_out;
7770
7771                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7772                                                  TG3_RX_JMB_RING_BYTES(tp),
7773                                                  &tpr->rx_jmb_mapping,
7774                                                  GFP_KERNEL);
7775                 if (!tpr->rx_jmb)
7776                         goto err_out;
7777         }
7778
7779         return 0;
7780
7781 err_out:
7782         tg3_rx_prodring_fini(tp, tpr);
7783         return -ENOMEM;
7784 }
7785
7786 /* Free up pending packets in all rx/tx rings.
7787  *
7788  * The chip has been shut down and the driver detached from
7789  * the networking, so no interrupts or new tx packets will
7790  * end up in the driver.  tp->{tx,}lock is not held and we are not
7791  * in an interrupt context and thus may sleep.
7792  */
7793 static void tg3_free_rings(struct tg3 *tp)
7794 {
7795         int i, j;
7796
7797         for (j = 0; j < tp->irq_cnt; j++) {
7798                 struct tg3_napi *tnapi = &tp->napi[j];
7799
7800                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7801
7802                 if (!tnapi->tx_buffers)
7803                         continue;
7804
7805                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7806                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7807
7808                         if (!skb)
7809                                 continue;
7810
7811                         tg3_tx_skb_unmap(tnapi, i,
7812                                          skb_shinfo(skb)->nr_frags - 1);
7813
7814                         dev_kfree_skb_any(skb);
7815                 }
7816                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7817         }
7818 }
7819
7820 /* Initialize tx/rx rings for packet processing.
7821  *
7822  * The chip has been shut down and the driver detached from
7823  * the networking, so no interrupts or new tx packets will
7824  * end up in the driver.  tp->{tx,}lock are held and thus
7825  * we may not sleep.
7826  */
7827 static int tg3_init_rings(struct tg3 *tp)
7828 {
7829         int i;
7830
7831         /* Free up all the SKBs. */
7832         tg3_free_rings(tp);
7833
7834         for (i = 0; i < tp->irq_cnt; i++) {
7835                 struct tg3_napi *tnapi = &tp->napi[i];
7836
7837                 tnapi->last_tag = 0;
7838                 tnapi->last_irq_tag = 0;
7839                 tnapi->hw_status->status = 0;
7840                 tnapi->hw_status->status_tag = 0;
7841                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7842
7843                 tnapi->tx_prod = 0;
7844                 tnapi->tx_cons = 0;
7845                 if (tnapi->tx_ring)
7846                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7847
7848                 tnapi->rx_rcb_ptr = 0;
7849                 if (tnapi->rx_rcb)
7850                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7851
7852                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7853                         tg3_free_rings(tp);
7854                         return -ENOMEM;
7855                 }
7856         }
7857
7858         return 0;
7859 }
7860
7861 static void tg3_mem_tx_release(struct tg3 *tp)
7862 {
7863         int i;
7864
7865         for (i = 0; i < tp->irq_max; i++) {
7866                 struct tg3_napi *tnapi = &tp->napi[i];
7867
7868                 if (tnapi->tx_ring) {
7869                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7870                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7871                         tnapi->tx_ring = NULL;
7872                 }
7873
7874                 kfree(tnapi->tx_buffers);
7875                 tnapi->tx_buffers = NULL;
7876         }
7877 }
7878
7879 static int tg3_mem_tx_acquire(struct tg3 *tp)
7880 {
7881         int i;
7882         struct tg3_napi *tnapi = &tp->napi[0];
7883
7884         /* If multivector TSS is enabled, vector 0 does not handle
7885          * tx interrupts.  Don't allocate any resources for it.
7886          */
7887         if (tg3_flag(tp, ENABLE_TSS))
7888                 tnapi++;
7889
7890         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7891                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7892                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7893                 if (!tnapi->tx_buffers)
7894                         goto err_out;
7895
7896                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7897                                                     TG3_TX_RING_BYTES,
7898                                                     &tnapi->tx_desc_mapping,
7899                                                     GFP_KERNEL);
7900                 if (!tnapi->tx_ring)
7901                         goto err_out;
7902         }
7903
7904         return 0;
7905
7906 err_out:
7907         tg3_mem_tx_release(tp);
7908         return -ENOMEM;
7909 }
7910
7911 static void tg3_mem_rx_release(struct tg3 *tp)
7912 {
7913         int i;
7914
7915         for (i = 0; i < tp->irq_max; i++) {
7916                 struct tg3_napi *tnapi = &tp->napi[i];
7917
7918                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7919
7920                 if (!tnapi->rx_rcb)
7921                         continue;
7922
7923                 dma_free_coherent(&tp->pdev->dev,
7924                                   TG3_RX_RCB_RING_BYTES(tp),
7925                                   tnapi->rx_rcb,
7926                                   tnapi->rx_rcb_mapping);
7927                 tnapi->rx_rcb = NULL;
7928         }
7929 }
7930
7931 static int tg3_mem_rx_acquire(struct tg3 *tp)
7932 {
7933         unsigned int i, limit;
7934
7935         limit = tp->rxq_cnt;
7936
7937         /* If RSS is enabled, we need a (dummy) producer ring
7938          * set on vector zero.  This is the true hw prodring.
7939          */
7940         if (tg3_flag(tp, ENABLE_RSS))
7941                 limit++;
7942
7943         for (i = 0; i < limit; i++) {
7944                 struct tg3_napi *tnapi = &tp->napi[i];
7945
7946                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7947                         goto err_out;
7948
7949                 /* If multivector RSS is enabled, vector 0
7950                  * does not handle rx or tx interrupts.
7951                  * Don't allocate any resources for it.
7952                  */
7953                 if (!i && tg3_flag(tp, ENABLE_RSS))
7954                         continue;
7955
7956                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7957                                                    TG3_RX_RCB_RING_BYTES(tp),
7958                                                    &tnapi->rx_rcb_mapping,
7959                                                    GFP_KERNEL);
7960                 if (!tnapi->rx_rcb)
7961                         goto err_out;
7962
7963                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7964         }
7965
7966         return 0;
7967
7968 err_out:
7969         tg3_mem_rx_release(tp);
7970         return -ENOMEM;
7971 }
7972
7973 /*
7974  * Must not be invoked with interrupt sources disabled and
7975  * the hardware shutdown down.
7976  */
7977 static void tg3_free_consistent(struct tg3 *tp)
7978 {
7979         int i;
7980
7981         for (i = 0; i < tp->irq_cnt; i++) {
7982                 struct tg3_napi *tnapi = &tp->napi[i];
7983
7984                 if (tnapi->hw_status) {
7985                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7986                                           tnapi->hw_status,
7987                                           tnapi->status_mapping);
7988                         tnapi->hw_status = NULL;
7989                 }
7990         }
7991
7992         tg3_mem_rx_release(tp);
7993         tg3_mem_tx_release(tp);
7994
7995         if (tp->hw_stats) {
7996                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7997                                   tp->hw_stats, tp->stats_mapping);
7998                 tp->hw_stats = NULL;
7999         }
8000 }
8001
8002 /*
8003  * Must not be invoked with interrupt sources disabled and
8004  * the hardware shutdown down.  Can sleep.
8005  */
8006 static int tg3_alloc_consistent(struct tg3 *tp)
8007 {
8008         int i;
8009
8010         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8011                                           sizeof(struct tg3_hw_stats),
8012                                           &tp->stats_mapping,
8013                                           GFP_KERNEL);
8014         if (!tp->hw_stats)
8015                 goto err_out;
8016
8017         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8018
8019         for (i = 0; i < tp->irq_cnt; i++) {
8020                 struct tg3_napi *tnapi = &tp->napi[i];
8021                 struct tg3_hw_status *sblk;
8022
8023                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8024                                                       TG3_HW_STATUS_SIZE,
8025                                                       &tnapi->status_mapping,
8026                                                       GFP_KERNEL);
8027                 if (!tnapi->hw_status)
8028                         goto err_out;
8029
8030                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8031                 sblk = tnapi->hw_status;
8032
8033                 if (tg3_flag(tp, ENABLE_RSS)) {
8034                         u16 *prodptr = NULL;
8035
8036                         /*
8037                          * When RSS is enabled, the status block format changes
8038                          * slightly.  The "rx_jumbo_consumer", "reserved",
8039                          * and "rx_mini_consumer" members get mapped to the
8040                          * other three rx return ring producer indexes.
8041                          */
8042                         switch (i) {
8043                         case 1:
8044                                 prodptr = &sblk->idx[0].rx_producer;
8045                                 break;
8046                         case 2:
8047                                 prodptr = &sblk->rx_jumbo_consumer;
8048                                 break;
8049                         case 3:
8050                                 prodptr = &sblk->reserved;
8051                                 break;
8052                         case 4:
8053                                 prodptr = &sblk->rx_mini_consumer;
8054                                 break;
8055                         }
8056                         tnapi->rx_rcb_prod_idx = prodptr;
8057                 } else {
8058                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8059                 }
8060         }
8061
8062         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8063                 goto err_out;
8064
8065         return 0;
8066
8067 err_out:
8068         tg3_free_consistent(tp);
8069         return -ENOMEM;
8070 }
8071
8072 #define MAX_WAIT_CNT 1000
8073
8074 /* To stop a block, clear the enable bit and poll till it
8075  * clears.  tp->lock is held.
8076  */
8077 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
8078 {
8079         unsigned int i;
8080         u32 val;
8081
8082         if (tg3_flag(tp, 5705_PLUS)) {
8083                 switch (ofs) {
8084                 case RCVLSC_MODE:
8085                 case DMAC_MODE:
8086                 case MBFREE_MODE:
8087                 case BUFMGR_MODE:
8088                 case MEMARB_MODE:
8089                         /* We can't enable/disable these bits of the
8090                          * 5705/5750, just say success.
8091                          */
8092                         return 0;
8093
8094                 default:
8095                         break;
8096                 }
8097         }
8098
8099         val = tr32(ofs);
8100         val &= ~enable_bit;
8101         tw32_f(ofs, val);
8102
8103         for (i = 0; i < MAX_WAIT_CNT; i++) {
8104                 udelay(100);
8105                 val = tr32(ofs);
8106                 if ((val & enable_bit) == 0)
8107                         break;
8108         }
8109
8110         if (i == MAX_WAIT_CNT && !silent) {
8111                 dev_err(&tp->pdev->dev,
8112                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8113                         ofs, enable_bit);
8114                 return -ENODEV;
8115         }
8116
8117         return 0;
8118 }
8119
8120 /* tp->lock is held. */
8121 static int tg3_abort_hw(struct tg3 *tp, int silent)
8122 {
8123         int i, err;
8124
8125         tg3_disable_ints(tp);
8126
8127         tp->rx_mode &= ~RX_MODE_ENABLE;
8128         tw32_f(MAC_RX_MODE, tp->rx_mode);
8129         udelay(10);
8130
8131         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8132         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8133         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8134         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8135         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8136         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8137
8138         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8139         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8140         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8141         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8142         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8143         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8144         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8145
8146         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8147         tw32_f(MAC_MODE, tp->mac_mode);
8148         udelay(40);
8149
8150         tp->tx_mode &= ~TX_MODE_ENABLE;
8151         tw32_f(MAC_TX_MODE, tp->tx_mode);
8152
8153         for (i = 0; i < MAX_WAIT_CNT; i++) {
8154                 udelay(100);
8155                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8156                         break;
8157         }
8158         if (i >= MAX_WAIT_CNT) {
8159                 dev_err(&tp->pdev->dev,
8160                         "%s timed out, TX_MODE_ENABLE will not clear "
8161                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8162                 err |= -ENODEV;
8163         }
8164
8165         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8166         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8167         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8168
8169         tw32(FTQ_RESET, 0xffffffff);
8170         tw32(FTQ_RESET, 0x00000000);
8171
8172         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8173         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8174
8175         for (i = 0; i < tp->irq_cnt; i++) {
8176                 struct tg3_napi *tnapi = &tp->napi[i];
8177                 if (tnapi->hw_status)
8178                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8179         }
8180
8181         return err;
8182 }
8183
8184 /* Save PCI command register before chip reset */
8185 static void tg3_save_pci_state(struct tg3 *tp)
8186 {
8187         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8188 }
8189
8190 /* Restore PCI state after chip reset */
8191 static void tg3_restore_pci_state(struct tg3 *tp)
8192 {
8193         u32 val;
8194
8195         /* Re-enable indirect register accesses. */
8196         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8197                                tp->misc_host_ctrl);
8198
8199         /* Set MAX PCI retry to zero. */
8200         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8201         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8202             tg3_flag(tp, PCIX_MODE))
8203                 val |= PCISTATE_RETRY_SAME_DMA;
8204         /* Allow reads and writes to the APE register and memory space. */
8205         if (tg3_flag(tp, ENABLE_APE))
8206                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8207                        PCISTATE_ALLOW_APE_SHMEM_WR |
8208                        PCISTATE_ALLOW_APE_PSPACE_WR;
8209         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8210
8211         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8212
8213         if (!tg3_flag(tp, PCI_EXPRESS)) {
8214                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8215                                       tp->pci_cacheline_sz);
8216                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8217                                       tp->pci_lat_timer);
8218         }
8219
8220         /* Make sure PCI-X relaxed ordering bit is clear. */
8221         if (tg3_flag(tp, PCIX_MODE)) {
8222                 u16 pcix_cmd;
8223
8224                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8225                                      &pcix_cmd);
8226                 pcix_cmd &= ~PCI_X_CMD_ERO;
8227                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8228                                       pcix_cmd);
8229         }
8230
8231         if (tg3_flag(tp, 5780_CLASS)) {
8232
8233                 /* Chip reset on 5780 will reset MSI enable bit,
8234                  * so need to restore it.
8235                  */
8236                 if (tg3_flag(tp, USING_MSI)) {
8237                         u16 ctrl;
8238
8239                         pci_read_config_word(tp->pdev,
8240                                              tp->msi_cap + PCI_MSI_FLAGS,
8241                                              &ctrl);
8242                         pci_write_config_word(tp->pdev,
8243                                               tp->msi_cap + PCI_MSI_FLAGS,
8244                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8245                         val = tr32(MSGINT_MODE);
8246                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8247                 }
8248         }
8249 }
8250
8251 /* tp->lock is held. */
8252 static int tg3_chip_reset(struct tg3 *tp)
8253 {
8254         u32 val;
8255         void (*write_op)(struct tg3 *, u32, u32);
8256         int i, err;
8257
8258         tg3_nvram_lock(tp);
8259
8260         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8261
8262         /* No matching tg3_nvram_unlock() after this because
8263          * chip reset below will undo the nvram lock.
8264          */
8265         tp->nvram_lock_cnt = 0;
8266
8267         /* GRC_MISC_CFG core clock reset will clear the memory
8268          * enable bit in PCI register 4 and the MSI enable bit
8269          * on some chips, so we save relevant registers here.
8270          */
8271         tg3_save_pci_state(tp);
8272
8273         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8274             tg3_flag(tp, 5755_PLUS))
8275                 tw32(GRC_FASTBOOT_PC, 0);
8276
8277         /*
8278          * We must avoid the readl() that normally takes place.
8279          * It locks machines, causes machine checks, and other
8280          * fun things.  So, temporarily disable the 5701
8281          * hardware workaround, while we do the reset.
8282          */
8283         write_op = tp->write32;
8284         if (write_op == tg3_write_flush_reg32)
8285                 tp->write32 = tg3_write32;
8286
8287         /* Prevent the irq handler from reading or writing PCI registers
8288          * during chip reset when the memory enable bit in the PCI command
8289          * register may be cleared.  The chip does not generate interrupt
8290          * at this time, but the irq handler may still be called due to irq
8291          * sharing or irqpoll.
8292          */
8293         tg3_flag_set(tp, CHIP_RESETTING);
8294         for (i = 0; i < tp->irq_cnt; i++) {
8295                 struct tg3_napi *tnapi = &tp->napi[i];
8296                 if (tnapi->hw_status) {
8297                         tnapi->hw_status->status = 0;
8298                         tnapi->hw_status->status_tag = 0;
8299                 }
8300                 tnapi->last_tag = 0;
8301                 tnapi->last_irq_tag = 0;
8302         }
8303         smp_mb();
8304
8305         for (i = 0; i < tp->irq_cnt; i++)
8306                 synchronize_irq(tp->napi[i].irq_vec);
8307
8308         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8309                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8310                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8311         }
8312
8313         /* do the reset */
8314         val = GRC_MISC_CFG_CORECLK_RESET;
8315
8316         if (tg3_flag(tp, PCI_EXPRESS)) {
8317                 /* Force PCIe 1.0a mode */
8318                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8319                     !tg3_flag(tp, 57765_PLUS) &&
8320                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8321                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8322                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8323
8324                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8325                         tw32(GRC_MISC_CFG, (1 << 29));
8326                         val |= (1 << 29);
8327                 }
8328         }
8329
8330         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8331                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8332                 tw32(GRC_VCPU_EXT_CTRL,
8333                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8334         }
8335
8336         /* Manage gphy power for all CPMU absent PCIe devices. */
8337         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8338                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8339
8340         tw32(GRC_MISC_CFG, val);
8341
8342         /* restore 5701 hardware bug workaround write method */
8343         tp->write32 = write_op;
8344
8345         /* Unfortunately, we have to delay before the PCI read back.
8346          * Some 575X chips even will not respond to a PCI cfg access
8347          * when the reset command is given to the chip.
8348          *
8349          * How do these hardware designers expect things to work
8350          * properly if the PCI write is posted for a long period
8351          * of time?  It is always necessary to have some method by
8352          * which a register read back can occur to push the write
8353          * out which does the reset.
8354          *
8355          * For most tg3 variants the trick below was working.
8356          * Ho hum...
8357          */
8358         udelay(120);
8359
8360         /* Flush PCI posted writes.  The normal MMIO registers
8361          * are inaccessible at this time so this is the only
8362          * way to make this reliably (actually, this is no longer
8363          * the case, see above).  I tried to use indirect
8364          * register read/write but this upset some 5701 variants.
8365          */
8366         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8367
8368         udelay(120);
8369
8370         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8371                 u16 val16;
8372
8373                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8374                         int j;
8375                         u32 cfg_val;
8376
8377                         /* Wait for link training to complete.  */
8378                         for (j = 0; j < 5000; j++)
8379                                 udelay(100);
8380
8381                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8382                         pci_write_config_dword(tp->pdev, 0xc4,
8383                                                cfg_val | (1 << 15));
8384                 }
8385
8386                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8387                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8388                 /*
8389                  * Older PCIe devices only support the 128 byte
8390                  * MPS setting.  Enforce the restriction.
8391                  */
8392                 if (!tg3_flag(tp, CPMU_PRESENT))
8393                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8394                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8395
8396                 /* Clear error status */
8397                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8398                                       PCI_EXP_DEVSTA_CED |
8399                                       PCI_EXP_DEVSTA_NFED |
8400                                       PCI_EXP_DEVSTA_FED |
8401                                       PCI_EXP_DEVSTA_URD);
8402         }
8403
8404         tg3_restore_pci_state(tp);
8405
8406         tg3_flag_clear(tp, CHIP_RESETTING);
8407         tg3_flag_clear(tp, ERROR_PROCESSED);
8408
8409         val = 0;
8410         if (tg3_flag(tp, 5780_CLASS))
8411                 val = tr32(MEMARB_MODE);
8412         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8413
8414         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8415                 tg3_stop_fw(tp);
8416                 tw32(0x5000, 0x400);
8417         }
8418
8419         tw32(GRC_MODE, tp->grc_mode);
8420
8421         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8422                 val = tr32(0xc4);
8423
8424                 tw32(0xc4, val | (1 << 15));
8425         }
8426
8427         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8428             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8429                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8430                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8431                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8432                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8433         }
8434
8435         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8436                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8437                 val = tp->mac_mode;
8438         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8439                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8440                 val = tp->mac_mode;
8441         } else
8442                 val = 0;
8443
8444         tw32_f(MAC_MODE, val);
8445         udelay(40);
8446
8447         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8448
8449         err = tg3_poll_fw(tp);
8450         if (err)
8451                 return err;
8452
8453         tg3_mdio_start(tp);
8454
8455         if (tg3_flag(tp, PCI_EXPRESS) &&
8456             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8457             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8458             !tg3_flag(tp, 57765_PLUS)) {
8459                 val = tr32(0x7c00);
8460
8461                 tw32(0x7c00, val | (1 << 25));
8462         }
8463
8464         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8465                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8466                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8467         }
8468
8469         /* Reprobe ASF enable state.  */
8470         tg3_flag_clear(tp, ENABLE_ASF);
8471         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8472         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8473         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8474                 u32 nic_cfg;
8475
8476                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8477                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8478                         tg3_flag_set(tp, ENABLE_ASF);
8479                         tp->last_event_jiffies = jiffies;
8480                         if (tg3_flag(tp, 5750_PLUS))
8481                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8482                 }
8483         }
8484
8485         return 0;
8486 }
8487
8488 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8489 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8490
8491 /* tp->lock is held. */
8492 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8493 {
8494         int err;
8495
8496         tg3_stop_fw(tp);
8497
8498         tg3_write_sig_pre_reset(tp, kind);
8499
8500         tg3_abort_hw(tp, silent);
8501         err = tg3_chip_reset(tp);
8502
8503         __tg3_set_mac_addr(tp, 0);
8504
8505         tg3_write_sig_legacy(tp, kind);
8506         tg3_write_sig_post_reset(tp, kind);
8507
8508         if (tp->hw_stats) {
8509                 /* Save the stats across chip resets... */
8510                 tg3_get_nstats(tp, &tp->net_stats_prev);
8511                 tg3_get_estats(tp, &tp->estats_prev);
8512
8513                 /* And make sure the next sample is new data */
8514                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8515         }
8516
8517         if (err)
8518                 return err;
8519
8520         return 0;
8521 }
8522
8523 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8524 {
8525         struct tg3 *tp = netdev_priv(dev);
8526         struct sockaddr *addr = p;
8527         int err = 0, skip_mac_1 = 0;
8528
8529         if (!is_valid_ether_addr(addr->sa_data))
8530                 return -EADDRNOTAVAIL;
8531
8532         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8533
8534         if (!netif_running(dev))
8535                 return 0;
8536
8537         if (tg3_flag(tp, ENABLE_ASF)) {
8538                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8539
8540                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8541                 addr0_low = tr32(MAC_ADDR_0_LOW);
8542                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8543                 addr1_low = tr32(MAC_ADDR_1_LOW);
8544
8545                 /* Skip MAC addr 1 if ASF is using it. */
8546                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8547                     !(addr1_high == 0 && addr1_low == 0))
8548                         skip_mac_1 = 1;
8549         }
8550         spin_lock_bh(&tp->lock);
8551         __tg3_set_mac_addr(tp, skip_mac_1);
8552         spin_unlock_bh(&tp->lock);
8553
8554         return err;
8555 }
8556
8557 /* tp->lock is held. */
8558 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8559                            dma_addr_t mapping, u32 maxlen_flags,
8560                            u32 nic_addr)
8561 {
8562         tg3_write_mem(tp,
8563                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8564                       ((u64) mapping >> 32));
8565         tg3_write_mem(tp,
8566                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8567                       ((u64) mapping & 0xffffffff));
8568         tg3_write_mem(tp,
8569                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8570                        maxlen_flags);
8571
8572         if (!tg3_flag(tp, 5705_PLUS))
8573                 tg3_write_mem(tp,
8574                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8575                               nic_addr);
8576 }
8577
8578
8579 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8580 {
8581         int i = 0;
8582
8583         if (!tg3_flag(tp, ENABLE_TSS)) {
8584                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8585                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8586                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8587         } else {
8588                 tw32(HOSTCC_TXCOL_TICKS, 0);
8589                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8590                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8591
8592                 for (; i < tp->txq_cnt; i++) {
8593                         u32 reg;
8594
8595                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8596                         tw32(reg, ec->tx_coalesce_usecs);
8597                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8598                         tw32(reg, ec->tx_max_coalesced_frames);
8599                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8600                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8601                 }
8602         }
8603
8604         for (; i < tp->irq_max - 1; i++) {
8605                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8606                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8607                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8608         }
8609 }
8610
8611 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8612 {
8613         int i = 0;
8614         u32 limit = tp->rxq_cnt;
8615
8616         if (!tg3_flag(tp, ENABLE_RSS)) {
8617                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8618                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8619                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8620                 limit--;
8621         } else {
8622                 tw32(HOSTCC_RXCOL_TICKS, 0);
8623                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8624                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8625         }
8626
8627         for (; i < limit; i++) {
8628                 u32 reg;
8629
8630                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8631                 tw32(reg, ec->rx_coalesce_usecs);
8632                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8633                 tw32(reg, ec->rx_max_coalesced_frames);
8634                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8635                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8636         }
8637
8638         for (; i < tp->irq_max - 1; i++) {
8639                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8640                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8641                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8642         }
8643 }
8644
8645 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8646 {
8647         tg3_coal_tx_init(tp, ec);
8648         tg3_coal_rx_init(tp, ec);
8649
8650         if (!tg3_flag(tp, 5705_PLUS)) {
8651                 u32 val = ec->stats_block_coalesce_usecs;
8652
8653                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8654                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8655
8656                 if (!tp->link_up)
8657                         val = 0;
8658
8659                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8660         }
8661 }
8662
8663 /* tp->lock is held. */
8664 static void tg3_rings_reset(struct tg3 *tp)
8665 {
8666         int i;
8667         u32 stblk, txrcb, rxrcb, limit;
8668         struct tg3_napi *tnapi = &tp->napi[0];
8669
8670         /* Disable all transmit rings but the first. */
8671         if (!tg3_flag(tp, 5705_PLUS))
8672                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8673         else if (tg3_flag(tp, 5717_PLUS))
8674                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8675         else if (tg3_flag(tp, 57765_CLASS))
8676                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8677         else
8678                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8679
8680         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8681              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8682                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8683                               BDINFO_FLAGS_DISABLED);
8684
8685
8686         /* Disable all receive return rings but the first. */
8687         if (tg3_flag(tp, 5717_PLUS))
8688                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8689         else if (!tg3_flag(tp, 5705_PLUS))
8690                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8691         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8692                  tg3_flag(tp, 57765_CLASS))
8693                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8694         else
8695                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8696
8697         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8698              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8699                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8700                               BDINFO_FLAGS_DISABLED);
8701
8702         /* Disable interrupts */
8703         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8704         tp->napi[0].chk_msi_cnt = 0;
8705         tp->napi[0].last_rx_cons = 0;
8706         tp->napi[0].last_tx_cons = 0;
8707
8708         /* Zero mailbox registers. */
8709         if (tg3_flag(tp, SUPPORT_MSIX)) {
8710                 for (i = 1; i < tp->irq_max; i++) {
8711                         tp->napi[i].tx_prod = 0;
8712                         tp->napi[i].tx_cons = 0;
8713                         if (tg3_flag(tp, ENABLE_TSS))
8714                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8715                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8716                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8717                         tp->napi[i].chk_msi_cnt = 0;
8718                         tp->napi[i].last_rx_cons = 0;
8719                         tp->napi[i].last_tx_cons = 0;
8720                 }
8721                 if (!tg3_flag(tp, ENABLE_TSS))
8722                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8723         } else {
8724                 tp->napi[0].tx_prod = 0;
8725                 tp->napi[0].tx_cons = 0;
8726                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8727                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8728         }
8729
8730         /* Make sure the NIC-based send BD rings are disabled. */
8731         if (!tg3_flag(tp, 5705_PLUS)) {
8732                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8733                 for (i = 0; i < 16; i++)
8734                         tw32_tx_mbox(mbox + i * 8, 0);
8735         }
8736
8737         txrcb = NIC_SRAM_SEND_RCB;
8738         rxrcb = NIC_SRAM_RCV_RET_RCB;
8739
8740         /* Clear status block in ram. */
8741         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8742
8743         /* Set status block DMA address */
8744         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8745              ((u64) tnapi->status_mapping >> 32));
8746         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8747              ((u64) tnapi->status_mapping & 0xffffffff));
8748
8749         if (tnapi->tx_ring) {
8750                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8751                                (TG3_TX_RING_SIZE <<
8752                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8753                                NIC_SRAM_TX_BUFFER_DESC);
8754                 txrcb += TG3_BDINFO_SIZE;
8755         }
8756
8757         if (tnapi->rx_rcb) {
8758                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8759                                (tp->rx_ret_ring_mask + 1) <<
8760                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8761                 rxrcb += TG3_BDINFO_SIZE;
8762         }
8763
8764         stblk = HOSTCC_STATBLCK_RING1;
8765
8766         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8767                 u64 mapping = (u64)tnapi->status_mapping;
8768                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8769                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8770
8771                 /* Clear status block in ram. */
8772                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8773
8774                 if (tnapi->tx_ring) {
8775                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8776                                        (TG3_TX_RING_SIZE <<
8777                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8778                                        NIC_SRAM_TX_BUFFER_DESC);
8779                         txrcb += TG3_BDINFO_SIZE;
8780                 }
8781
8782                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8783                                ((tp->rx_ret_ring_mask + 1) <<
8784                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8785
8786                 stblk += 8;
8787                 rxrcb += TG3_BDINFO_SIZE;
8788         }
8789 }
8790
8791 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8792 {
8793         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8794
8795         if (!tg3_flag(tp, 5750_PLUS) ||
8796             tg3_flag(tp, 5780_CLASS) ||
8797             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8798             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8799             tg3_flag(tp, 57765_PLUS))
8800                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8801         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8802                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8803                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8804         else
8805                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8806
8807         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8808         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8809
8810         val = min(nic_rep_thresh, host_rep_thresh);
8811         tw32(RCVBDI_STD_THRESH, val);
8812
8813         if (tg3_flag(tp, 57765_PLUS))
8814                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8815
8816         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8817                 return;
8818
8819         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8820
8821         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8822
8823         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8824         tw32(RCVBDI_JUMBO_THRESH, val);
8825
8826         if (tg3_flag(tp, 57765_PLUS))
8827                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8828 }
8829
8830 static inline u32 calc_crc(unsigned char *buf, int len)
8831 {
8832         u32 reg;
8833         u32 tmp;
8834         int j, k;
8835
8836         reg = 0xffffffff;
8837
8838         for (j = 0; j < len; j++) {
8839                 reg ^= buf[j];
8840
8841                 for (k = 0; k < 8; k++) {
8842                         tmp = reg & 0x01;
8843
8844                         reg >>= 1;
8845
8846                         if (tmp)
8847                                 reg ^= 0xedb88320;
8848                 }
8849         }
8850
8851         return ~reg;
8852 }
8853
8854 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8855 {
8856         /* accept or reject all multicast frames */
8857         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8858         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8859         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8860         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8861 }
8862
8863 static void __tg3_set_rx_mode(struct net_device *dev)
8864 {
8865         struct tg3 *tp = netdev_priv(dev);
8866         u32 rx_mode;
8867
8868         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8869                                   RX_MODE_KEEP_VLAN_TAG);
8870
8871 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8872         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8873          * flag clear.
8874          */
8875         if (!tg3_flag(tp, ENABLE_ASF))
8876                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8877 #endif
8878
8879         if (dev->flags & IFF_PROMISC) {
8880                 /* Promiscuous mode. */
8881                 rx_mode |= RX_MODE_PROMISC;
8882         } else if (dev->flags & IFF_ALLMULTI) {
8883                 /* Accept all multicast. */
8884                 tg3_set_multi(tp, 1);
8885         } else if (netdev_mc_empty(dev)) {
8886                 /* Reject all multicast. */
8887                 tg3_set_multi(tp, 0);
8888         } else {
8889                 /* Accept one or more multicast(s). */
8890                 struct netdev_hw_addr *ha;
8891                 u32 mc_filter[4] = { 0, };
8892                 u32 regidx;
8893                 u32 bit;
8894                 u32 crc;
8895
8896                 netdev_for_each_mc_addr(ha, dev) {
8897                         crc = calc_crc(ha->addr, ETH_ALEN);
8898                         bit = ~crc & 0x7f;
8899                         regidx = (bit & 0x60) >> 5;
8900                         bit &= 0x1f;
8901                         mc_filter[regidx] |= (1 << bit);
8902                 }
8903
8904                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8905                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8906                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8907                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8908         }
8909
8910         if (rx_mode != tp->rx_mode) {
8911                 tp->rx_mode = rx_mode;
8912                 tw32_f(MAC_RX_MODE, rx_mode);
8913                 udelay(10);
8914         }
8915 }
8916
8917 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8918 {
8919         int i;
8920
8921         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8922                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8923 }
8924
8925 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8926 {
8927         int i;
8928
8929         if (!tg3_flag(tp, SUPPORT_MSIX))
8930                 return;
8931
8932         if (tp->rxq_cnt == 1) {
8933                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8934                 return;
8935         }
8936
8937         /* Validate table against current IRQ count */
8938         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8939                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8940                         break;
8941         }
8942
8943         if (i != TG3_RSS_INDIR_TBL_SIZE)
8944                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8945 }
8946
8947 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8948 {
8949         int i = 0;
8950         u32 reg = MAC_RSS_INDIR_TBL_0;
8951
8952         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8953                 u32 val = tp->rss_ind_tbl[i];
8954                 i++;
8955                 for (; i % 8; i++) {
8956                         val <<= 4;
8957                         val |= tp->rss_ind_tbl[i];
8958                 }
8959                 tw32(reg, val);
8960                 reg += 4;
8961         }
8962 }
8963
8964 /* tp->lock is held. */
8965 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8966 {
8967         u32 val, rdmac_mode;
8968         int i, err, limit;
8969         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8970
8971         tg3_disable_ints(tp);
8972
8973         tg3_stop_fw(tp);
8974
8975         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8976
8977         if (tg3_flag(tp, INIT_COMPLETE))
8978                 tg3_abort_hw(tp, 1);
8979
8980         /* Enable MAC control of LPI */
8981         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8982                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8983                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8984                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8985
8986                 tw32_f(TG3_CPMU_EEE_CTRL,
8987                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8988
8989                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8990                       TG3_CPMU_EEEMD_LPI_IN_TX |
8991                       TG3_CPMU_EEEMD_LPI_IN_RX |
8992                       TG3_CPMU_EEEMD_EEE_ENABLE;
8993
8994                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8995                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8996
8997                 if (tg3_flag(tp, ENABLE_APE))
8998                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8999
9000                 tw32_f(TG3_CPMU_EEE_MODE, val);
9001
9002                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9003                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9004                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9005
9006                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9007                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9008                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9009         }
9010
9011         if (reset_phy)
9012                 tg3_phy_reset(tp);
9013
9014         err = tg3_chip_reset(tp);
9015         if (err)
9016                 return err;
9017
9018         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9019
9020         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
9021                 val = tr32(TG3_CPMU_CTRL);
9022                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9023                 tw32(TG3_CPMU_CTRL, val);
9024
9025                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9026                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9027                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9028                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9029
9030                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9031                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9032                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9033                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9034
9035                 val = tr32(TG3_CPMU_HST_ACC);
9036                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9037                 val |= CPMU_HST_ACC_MACCLK_6_25;
9038                 tw32(TG3_CPMU_HST_ACC, val);
9039         }
9040
9041         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
9042                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9043                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9044                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9045                 tw32(PCIE_PWR_MGMT_THRESH, val);
9046
9047                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9048                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9049
9050                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9051
9052                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9053                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9054         }
9055
9056         if (tg3_flag(tp, L1PLLPD_EN)) {
9057                 u32 grc_mode = tr32(GRC_MODE);
9058
9059                 /* Access the lower 1K of PL PCIE block registers. */
9060                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9061                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9062
9063                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9064                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9065                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9066
9067                 tw32(GRC_MODE, grc_mode);
9068         }
9069
9070         if (tg3_flag(tp, 57765_CLASS)) {
9071                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
9072                         u32 grc_mode = tr32(GRC_MODE);
9073
9074                         /* Access the lower 1K of PL PCIE block registers. */
9075                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9076                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9077
9078                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9079                                    TG3_PCIE_PL_LO_PHYCTL5);
9080                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9081                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9082
9083                         tw32(GRC_MODE, grc_mode);
9084                 }
9085
9086                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
9087                         u32 grc_mode = tr32(GRC_MODE);
9088
9089                         /* Access the lower 1K of DL PCIE block registers. */
9090                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9091                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9092
9093                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9094                                    TG3_PCIE_DL_LO_FTSMAX);
9095                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9096                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9097                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9098
9099                         tw32(GRC_MODE, grc_mode);
9100                 }
9101
9102                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9103                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9104                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9105                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9106         }
9107
9108         /* This works around an issue with Athlon chipsets on
9109          * B3 tigon3 silicon.  This bit has no effect on any
9110          * other revision.  But do not set this on PCI Express
9111          * chips and don't even touch the clocks if the CPMU is present.
9112          */
9113         if (!tg3_flag(tp, CPMU_PRESENT)) {
9114                 if (!tg3_flag(tp, PCI_EXPRESS))
9115                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9116                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9117         }
9118
9119         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
9120             tg3_flag(tp, PCIX_MODE)) {
9121                 val = tr32(TG3PCI_PCISTATE);
9122                 val |= PCISTATE_RETRY_SAME_DMA;
9123                 tw32(TG3PCI_PCISTATE, val);
9124         }
9125
9126         if (tg3_flag(tp, ENABLE_APE)) {
9127                 /* Allow reads and writes to the
9128                  * APE register and memory space.
9129                  */
9130                 val = tr32(TG3PCI_PCISTATE);
9131                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9132                        PCISTATE_ALLOW_APE_SHMEM_WR |
9133                        PCISTATE_ALLOW_APE_PSPACE_WR;
9134                 tw32(TG3PCI_PCISTATE, val);
9135         }
9136
9137         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
9138                 /* Enable some hw fixes.  */
9139                 val = tr32(TG3PCI_MSI_DATA);
9140                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9141                 tw32(TG3PCI_MSI_DATA, val);
9142         }
9143
9144         /* Descriptor ring init may make accesses to the
9145          * NIC SRAM area to setup the TX descriptors, so we
9146          * can only do this after the hardware has been
9147          * successfully reset.
9148          */
9149         err = tg3_init_rings(tp);
9150         if (err)
9151                 return err;
9152
9153         if (tg3_flag(tp, 57765_PLUS)) {
9154                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9155                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9156                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
9157                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9158                 if (!tg3_flag(tp, 57765_CLASS) &&
9159                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
9160                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9161                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9162         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
9163                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
9164                 /* This value is determined during the probe time DMA
9165                  * engine test, tg3_test_dma.
9166                  */
9167                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9168         }
9169
9170         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9171                           GRC_MODE_4X_NIC_SEND_RINGS |
9172                           GRC_MODE_NO_TX_PHDR_CSUM |
9173                           GRC_MODE_NO_RX_PHDR_CSUM);
9174         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9175
9176         /* Pseudo-header checksum is done by hardware logic and not
9177          * the offload processers, so make the chip do the pseudo-
9178          * header checksums on receive.  For transmit it is more
9179          * convenient to do the pseudo-header checksum in software
9180          * as Linux does that on transmit for us in all cases.
9181          */
9182         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9183
9184         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9185         if (tp->rxptpctl)
9186                 tw32(TG3_RX_PTP_CTL,
9187                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9188
9189         if (tg3_flag(tp, PTP_CAPABLE))
9190                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9191
9192         tw32(GRC_MODE, tp->grc_mode | val);
9193
9194         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9195         val = tr32(GRC_MISC_CFG);
9196         val &= ~0xff;
9197         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9198         tw32(GRC_MISC_CFG, val);
9199
9200         /* Initialize MBUF/DESC pool. */
9201         if (tg3_flag(tp, 5750_PLUS)) {
9202                 /* Do nothing.  */
9203         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9204                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9205                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9206                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9207                 else
9208                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9209                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9210                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9211         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9212                 int fw_len;
9213
9214                 fw_len = tp->fw_len;
9215                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9216                 tw32(BUFMGR_MB_POOL_ADDR,
9217                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9218                 tw32(BUFMGR_MB_POOL_SIZE,
9219                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9220         }
9221
9222         if (tp->dev->mtu <= ETH_DATA_LEN) {
9223                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9224                      tp->bufmgr_config.mbuf_read_dma_low_water);
9225                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9226                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9227                 tw32(BUFMGR_MB_HIGH_WATER,
9228                      tp->bufmgr_config.mbuf_high_water);
9229         } else {
9230                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9231                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9232                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9233                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9234                 tw32(BUFMGR_MB_HIGH_WATER,
9235                      tp->bufmgr_config.mbuf_high_water_jumbo);
9236         }
9237         tw32(BUFMGR_DMA_LOW_WATER,
9238              tp->bufmgr_config.dma_low_water);
9239         tw32(BUFMGR_DMA_HIGH_WATER,
9240              tp->bufmgr_config.dma_high_water);
9241
9242         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9243         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9244                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9246             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9247             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9248                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9249         tw32(BUFMGR_MODE, val);
9250         for (i = 0; i < 2000; i++) {
9251                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9252                         break;
9253                 udelay(10);
9254         }
9255         if (i >= 2000) {
9256                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9257                 return -ENODEV;
9258         }
9259
9260         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9261                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9262
9263         tg3_setup_rxbd_thresholds(tp);
9264
9265         /* Initialize TG3_BDINFO's at:
9266          *  RCVDBDI_STD_BD:     standard eth size rx ring
9267          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9268          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9269          *
9270          * like so:
9271          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9272          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9273          *                              ring attribute flags
9274          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9275          *
9276          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9277          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9278          *
9279          * The size of each ring is fixed in the firmware, but the location is
9280          * configurable.
9281          */
9282         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9283              ((u64) tpr->rx_std_mapping >> 32));
9284         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9285              ((u64) tpr->rx_std_mapping & 0xffffffff));
9286         if (!tg3_flag(tp, 5717_PLUS))
9287                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9288                      NIC_SRAM_RX_BUFFER_DESC);
9289
9290         /* Disable the mini ring */
9291         if (!tg3_flag(tp, 5705_PLUS))
9292                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9293                      BDINFO_FLAGS_DISABLED);
9294
9295         /* Program the jumbo buffer descriptor ring control
9296          * blocks on those devices that have them.
9297          */
9298         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9299             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9300
9301                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9302                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9303                              ((u64) tpr->rx_jmb_mapping >> 32));
9304                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9305                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9306                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9307                               BDINFO_FLAGS_MAXLEN_SHIFT;
9308                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9309                              val | BDINFO_FLAGS_USE_EXT_RECV);
9310                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9311                             tg3_flag(tp, 57765_CLASS))
9312                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9313                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9314                 } else {
9315                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9316                              BDINFO_FLAGS_DISABLED);
9317                 }
9318
9319                 if (tg3_flag(tp, 57765_PLUS)) {
9320                         val = TG3_RX_STD_RING_SIZE(tp);
9321                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9322                         val |= (TG3_RX_STD_DMA_SZ << 2);
9323                 } else
9324                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9325         } else
9326                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9327
9328         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9329
9330         tpr->rx_std_prod_idx = tp->rx_pending;
9331         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9332
9333         tpr->rx_jmb_prod_idx =
9334                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9335         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9336
9337         tg3_rings_reset(tp);
9338
9339         /* Initialize MAC address and backoff seed. */
9340         __tg3_set_mac_addr(tp, 0);
9341
9342         /* MTU + ethernet header + FCS + optional VLAN tag */
9343         tw32(MAC_RX_MTU_SIZE,
9344              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9345
9346         /* The slot time is changed by tg3_setup_phy if we
9347          * run at gigabit with half duplex.
9348          */
9349         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9350               (6 << TX_LENGTHS_IPG_SHIFT) |
9351               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9352
9353         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9354                 val |= tr32(MAC_TX_LENGTHS) &
9355                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9356                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9357
9358         tw32(MAC_TX_LENGTHS, val);
9359
9360         /* Receive rules. */
9361         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9362         tw32(RCVLPC_CONFIG, 0x0181);
9363
9364         /* Calculate RDMAC_MODE setting early, we need it to determine
9365          * the RCVLPC_STATE_ENABLE mask.
9366          */
9367         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9368                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9369                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9370                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9371                       RDMAC_MODE_LNGREAD_ENAB);
9372
9373         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9374                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9375
9376         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9377             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9379                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9380                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9381                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9382
9383         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9384             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9385                 if (tg3_flag(tp, TSO_CAPABLE) &&
9386                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9387                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9388                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9389                            !tg3_flag(tp, IS_5788)) {
9390                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9391                 }
9392         }
9393
9394         if (tg3_flag(tp, PCI_EXPRESS))
9395                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9396
9397         if (tg3_flag(tp, HW_TSO_1) ||
9398             tg3_flag(tp, HW_TSO_2) ||
9399             tg3_flag(tp, HW_TSO_3))
9400                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9401
9402         if (tg3_flag(tp, 57765_PLUS) ||
9403             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9404             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9405                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9406
9407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9408                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9409
9410         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9411             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9412             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9413             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9414             tg3_flag(tp, 57765_PLUS)) {
9415                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9416                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9417                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9418                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9419                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9420                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9421                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9422                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9423                 }
9424                 tw32(TG3_RDMA_RSRVCTRL_REG,
9425                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9426         }
9427
9428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9429             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9430                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9431                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9432                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9433                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9434         }
9435
9436         /* Receive/send statistics. */
9437         if (tg3_flag(tp, 5750_PLUS)) {
9438                 val = tr32(RCVLPC_STATS_ENABLE);
9439                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9440                 tw32(RCVLPC_STATS_ENABLE, val);
9441         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9442                    tg3_flag(tp, TSO_CAPABLE)) {
9443                 val = tr32(RCVLPC_STATS_ENABLE);
9444                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9445                 tw32(RCVLPC_STATS_ENABLE, val);
9446         } else {
9447                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9448         }
9449         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9450         tw32(SNDDATAI_STATSENAB, 0xffffff);
9451         tw32(SNDDATAI_STATSCTRL,
9452              (SNDDATAI_SCTRL_ENABLE |
9453               SNDDATAI_SCTRL_FASTUPD));
9454
9455         /* Setup host coalescing engine. */
9456         tw32(HOSTCC_MODE, 0);
9457         for (i = 0; i < 2000; i++) {
9458                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9459                         break;
9460                 udelay(10);
9461         }
9462
9463         __tg3_set_coalesce(tp, &tp->coal);
9464
9465         if (!tg3_flag(tp, 5705_PLUS)) {
9466                 /* Status/statistics block address.  See tg3_timer,
9467                  * the tg3_periodic_fetch_stats call there, and
9468                  * tg3_get_stats to see how this works for 5705/5750 chips.
9469                  */
9470                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9471                      ((u64) tp->stats_mapping >> 32));
9472                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9473                      ((u64) tp->stats_mapping & 0xffffffff));
9474                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9475
9476                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9477
9478                 /* Clear statistics and status block memory areas */
9479                 for (i = NIC_SRAM_STATS_BLK;
9480                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9481                      i += sizeof(u32)) {
9482                         tg3_write_mem(tp, i, 0);
9483                         udelay(40);
9484                 }
9485         }
9486
9487         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9488
9489         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9490         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9491         if (!tg3_flag(tp, 5705_PLUS))
9492                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9493
9494         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9495                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9496                 /* reset to prevent losing 1st rx packet intermittently */
9497                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9498                 udelay(10);
9499         }
9500
9501         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9502                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9503                         MAC_MODE_FHDE_ENABLE;
9504         if (tg3_flag(tp, ENABLE_APE))
9505                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9506         if (!tg3_flag(tp, 5705_PLUS) &&
9507             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9508             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9509                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9510         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9511         udelay(40);
9512
9513         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9514          * If TG3_FLAG_IS_NIC is zero, we should read the
9515          * register to preserve the GPIO settings for LOMs. The GPIOs,
9516          * whether used as inputs or outputs, are set by boot code after
9517          * reset.
9518          */
9519         if (!tg3_flag(tp, IS_NIC)) {
9520                 u32 gpio_mask;
9521
9522                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9523                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9524                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9525
9526                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9527                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9528                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9529
9530                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9531                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9532
9533                 tp->grc_local_ctrl &= ~gpio_mask;
9534                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9535
9536                 /* GPIO1 must be driven high for eeprom write protect */
9537                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9538                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9539                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9540         }
9541         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9542         udelay(100);
9543
9544         if (tg3_flag(tp, USING_MSIX)) {
9545                 val = tr32(MSGINT_MODE);
9546                 val |= MSGINT_MODE_ENABLE;
9547                 if (tp->irq_cnt > 1)
9548                         val |= MSGINT_MODE_MULTIVEC_EN;
9549                 if (!tg3_flag(tp, 1SHOT_MSI))
9550                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9551                 tw32(MSGINT_MODE, val);
9552         }
9553
9554         if (!tg3_flag(tp, 5705_PLUS)) {
9555                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9556                 udelay(40);
9557         }
9558
9559         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9560                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9561                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9562                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9563                WDMAC_MODE_LNGREAD_ENAB);
9564
9565         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9566             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9567                 if (tg3_flag(tp, TSO_CAPABLE) &&
9568                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9569                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9570                         /* nothing */
9571                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9572                            !tg3_flag(tp, IS_5788)) {
9573                         val |= WDMAC_MODE_RX_ACCEL;
9574                 }
9575         }
9576
9577         /* Enable host coalescing bug fix */
9578         if (tg3_flag(tp, 5755_PLUS))
9579                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9580
9581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9582                 val |= WDMAC_MODE_BURST_ALL_DATA;
9583
9584         tw32_f(WDMAC_MODE, val);
9585         udelay(40);
9586
9587         if (tg3_flag(tp, PCIX_MODE)) {
9588                 u16 pcix_cmd;
9589
9590                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9591                                      &pcix_cmd);
9592                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9593                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9594                         pcix_cmd |= PCI_X_CMD_READ_2K;
9595                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9596                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9597                         pcix_cmd |= PCI_X_CMD_READ_2K;
9598                 }
9599                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9600                                       pcix_cmd);
9601         }
9602
9603         tw32_f(RDMAC_MODE, rdmac_mode);
9604         udelay(40);
9605
9606         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9607                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9608                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9609                                 break;
9610                 }
9611                 if (i < TG3_NUM_RDMA_CHANNELS) {
9612                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9613                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9614                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9615                         tg3_flag_set(tp, 5719_RDMA_BUG);
9616                 }
9617         }
9618
9619         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9620         if (!tg3_flag(tp, 5705_PLUS))
9621                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9622
9623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9624                 tw32(SNDDATAC_MODE,
9625                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9626         else
9627                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9628
9629         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9630         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9631         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9632         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9633                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9634         tw32(RCVDBDI_MODE, val);
9635         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9636         if (tg3_flag(tp, HW_TSO_1) ||
9637             tg3_flag(tp, HW_TSO_2) ||
9638             tg3_flag(tp, HW_TSO_3))
9639                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9640         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9641         if (tg3_flag(tp, ENABLE_TSS))
9642                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9643         tw32(SNDBDI_MODE, val);
9644         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9645
9646         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9647                 err = tg3_load_5701_a0_firmware_fix(tp);
9648                 if (err)
9649                         return err;
9650         }
9651
9652         if (tg3_flag(tp, TSO_CAPABLE)) {
9653                 err = tg3_load_tso_firmware(tp);
9654                 if (err)
9655                         return err;
9656         }
9657
9658         tp->tx_mode = TX_MODE_ENABLE;
9659
9660         if (tg3_flag(tp, 5755_PLUS) ||
9661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9662                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9663
9664         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9665                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9666                 tp->tx_mode &= ~val;
9667                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9668         }
9669
9670         tw32_f(MAC_TX_MODE, tp->tx_mode);
9671         udelay(100);
9672
9673         if (tg3_flag(tp, ENABLE_RSS)) {
9674                 tg3_rss_write_indir_tbl(tp);
9675
9676                 /* Setup the "secret" hash key. */
9677                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9678                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9679                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9680                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9681                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9682                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9683                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9684                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9685                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9686                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9687         }
9688
9689         tp->rx_mode = RX_MODE_ENABLE;
9690         if (tg3_flag(tp, 5755_PLUS))
9691                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9692
9693         if (tg3_flag(tp, ENABLE_RSS))
9694                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9695                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9696                                RX_MODE_RSS_IPV6_HASH_EN |
9697                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9698                                RX_MODE_RSS_IPV4_HASH_EN |
9699                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9700
9701         tw32_f(MAC_RX_MODE, tp->rx_mode);
9702         udelay(10);
9703
9704         tw32(MAC_LED_CTRL, tp->led_ctrl);
9705
9706         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9707         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9708                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9709                 udelay(10);
9710         }
9711         tw32_f(MAC_RX_MODE, tp->rx_mode);
9712         udelay(10);
9713
9714         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9715                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9716                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9717                         /* Set drive transmission level to 1.2V  */
9718                         /* only if the signal pre-emphasis bit is not set  */
9719                         val = tr32(MAC_SERDES_CFG);
9720                         val &= 0xfffff000;
9721                         val |= 0x880;
9722                         tw32(MAC_SERDES_CFG, val);
9723                 }
9724                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9725                         tw32(MAC_SERDES_CFG, 0x616000);
9726         }
9727
9728         /* Prevent chip from dropping frames when flow control
9729          * is enabled.
9730          */
9731         if (tg3_flag(tp, 57765_CLASS))
9732                 val = 1;
9733         else
9734                 val = 2;
9735         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9736
9737         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9738             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9739                 /* Use hardware link auto-negotiation */
9740                 tg3_flag_set(tp, HW_AUTONEG);
9741         }
9742
9743         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9744             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9745                 u32 tmp;
9746
9747                 tmp = tr32(SERDES_RX_CTRL);
9748                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9749                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9750                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9751                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9752         }
9753
9754         if (!tg3_flag(tp, USE_PHYLIB)) {
9755                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9756                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9757
9758                 err = tg3_setup_phy(tp, 0);
9759                 if (err)
9760                         return err;
9761
9762                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9763                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9764                         u32 tmp;
9765
9766                         /* Clear CRC stats. */
9767                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9768                                 tg3_writephy(tp, MII_TG3_TEST1,
9769                                              tmp | MII_TG3_TEST1_CRC_EN);
9770                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9771                         }
9772                 }
9773         }
9774
9775         __tg3_set_rx_mode(tp->dev);
9776
9777         /* Initialize receive rules. */
9778         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9779         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9780         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9781         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9782
9783         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9784                 limit = 8;
9785         else
9786                 limit = 16;
9787         if (tg3_flag(tp, ENABLE_ASF))
9788                 limit -= 4;
9789         switch (limit) {
9790         case 16:
9791                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9792         case 15:
9793                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9794         case 14:
9795                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9796         case 13:
9797                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9798         case 12:
9799                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9800         case 11:
9801                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9802         case 10:
9803                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9804         case 9:
9805                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9806         case 8:
9807                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9808         case 7:
9809                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9810         case 6:
9811                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9812         case 5:
9813                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9814         case 4:
9815                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9816         case 3:
9817                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9818         case 2:
9819         case 1:
9820
9821         default:
9822                 break;
9823         }
9824
9825         if (tg3_flag(tp, ENABLE_APE))
9826                 /* Write our heartbeat update interval to APE. */
9827                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9828                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9829
9830         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9831
9832         return 0;
9833 }
9834
9835 /* Called at device open time to get the chip ready for
9836  * packet processing.  Invoked with tp->lock held.
9837  */
9838 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9839 {
9840         tg3_switch_clocks(tp);
9841
9842         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9843
9844         return tg3_reset_hw(tp, reset_phy);
9845 }
9846
9847 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9848 {
9849         int i;
9850
9851         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9852                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9853
9854                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9855                 off += len;
9856
9857                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9858                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9859                         memset(ocir, 0, TG3_OCIR_LEN);
9860         }
9861 }
9862
9863 /* sysfs attributes for hwmon */
9864 static ssize_t tg3_show_temp(struct device *dev,
9865                              struct device_attribute *devattr, char *buf)
9866 {
9867         struct pci_dev *pdev = to_pci_dev(dev);
9868         struct net_device *netdev = pci_get_drvdata(pdev);
9869         struct tg3 *tp = netdev_priv(netdev);
9870         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9871         u32 temperature;
9872
9873         spin_lock_bh(&tp->lock);
9874         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9875                                 sizeof(temperature));
9876         spin_unlock_bh(&tp->lock);
9877         return sprintf(buf, "%u\n", temperature);
9878 }
9879
9880
9881 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9882                           TG3_TEMP_SENSOR_OFFSET);
9883 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9884                           TG3_TEMP_CAUTION_OFFSET);
9885 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9886                           TG3_TEMP_MAX_OFFSET);
9887
9888 static struct attribute *tg3_attributes[] = {
9889         &sensor_dev_attr_temp1_input.dev_attr.attr,
9890         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9891         &sensor_dev_attr_temp1_max.dev_attr.attr,
9892         NULL
9893 };
9894
9895 static const struct attribute_group tg3_group = {
9896         .attrs = tg3_attributes,
9897 };
9898
9899 static void tg3_hwmon_close(struct tg3 *tp)
9900 {
9901         if (tp->hwmon_dev) {
9902                 hwmon_device_unregister(tp->hwmon_dev);
9903                 tp->hwmon_dev = NULL;
9904                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9905         }
9906 }
9907
9908 static void tg3_hwmon_open(struct tg3 *tp)
9909 {
9910         int i, err;
9911         u32 size = 0;
9912         struct pci_dev *pdev = tp->pdev;
9913         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9914
9915         tg3_sd_scan_scratchpad(tp, ocirs);
9916
9917         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9918                 if (!ocirs[i].src_data_length)
9919                         continue;
9920
9921                 size += ocirs[i].src_hdr_length;
9922                 size += ocirs[i].src_data_length;
9923         }
9924
9925         if (!size)
9926                 return;
9927
9928         /* Register hwmon sysfs hooks */
9929         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9930         if (err) {
9931                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9932                 return;
9933         }
9934
9935         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9936         if (IS_ERR(tp->hwmon_dev)) {
9937                 tp->hwmon_dev = NULL;
9938                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9939                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9940         }
9941 }
9942
9943
9944 #define TG3_STAT_ADD32(PSTAT, REG) \
9945 do {    u32 __val = tr32(REG); \
9946         (PSTAT)->low += __val; \
9947         if ((PSTAT)->low < __val) \
9948                 (PSTAT)->high += 1; \
9949 } while (0)
9950
9951 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9952 {
9953         struct tg3_hw_stats *sp = tp->hw_stats;
9954
9955         if (!tp->link_up)
9956                 return;
9957
9958         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9959         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9960         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9961         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9962         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9963         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9964         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9965         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9966         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9967         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9968         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9969         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9970         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9971         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9972                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9973                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9974                 u32 val;
9975
9976                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9977                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9978                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9979                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9980         }
9981
9982         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9983         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9984         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9985         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9986         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9987         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9988         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9989         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9990         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9991         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9992         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9993         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9994         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9995         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9996
9997         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9998         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9999             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
10000             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
10001                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10002         } else {
10003                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10004                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10005                 if (val) {
10006                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10007                         sp->rx_discards.low += val;
10008                         if (sp->rx_discards.low < val)
10009                                 sp->rx_discards.high += 1;
10010                 }
10011                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10012         }
10013         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10014 }
10015
10016 static void tg3_chk_missed_msi(struct tg3 *tp)
10017 {
10018         u32 i;
10019
10020         for (i = 0; i < tp->irq_cnt; i++) {
10021                 struct tg3_napi *tnapi = &tp->napi[i];
10022
10023                 if (tg3_has_work(tnapi)) {
10024                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10025                             tnapi->last_tx_cons == tnapi->tx_cons) {
10026                                 if (tnapi->chk_msi_cnt < 1) {
10027                                         tnapi->chk_msi_cnt++;
10028                                         return;
10029                                 }
10030                                 tg3_msi(0, tnapi);
10031                         }
10032                 }
10033                 tnapi->chk_msi_cnt = 0;
10034                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10035                 tnapi->last_tx_cons = tnapi->tx_cons;
10036         }
10037 }
10038
10039 static void tg3_timer(unsigned long __opaque)
10040 {
10041         struct tg3 *tp = (struct tg3 *) __opaque;
10042
10043         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10044                 goto restart_timer;
10045
10046         spin_lock(&tp->lock);
10047
10048         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10049             tg3_flag(tp, 57765_CLASS))
10050                 tg3_chk_missed_msi(tp);
10051
10052         if (!tg3_flag(tp, TAGGED_STATUS)) {
10053                 /* All of this garbage is because when using non-tagged
10054                  * IRQ status the mailbox/status_block protocol the chip
10055                  * uses with the cpu is race prone.
10056                  */
10057                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10058                         tw32(GRC_LOCAL_CTRL,
10059                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10060                 } else {
10061                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10062                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10063                 }
10064
10065                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10066                         spin_unlock(&tp->lock);
10067                         tg3_reset_task_schedule(tp);
10068                         goto restart_timer;
10069                 }
10070         }
10071
10072         /* This part only runs once per second. */
10073         if (!--tp->timer_counter) {
10074                 if (tg3_flag(tp, 5705_PLUS))
10075                         tg3_periodic_fetch_stats(tp);
10076
10077                 if (tp->setlpicnt && !--tp->setlpicnt)
10078                         tg3_phy_eee_enable(tp);
10079
10080                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10081                         u32 mac_stat;
10082                         int phy_event;
10083
10084                         mac_stat = tr32(MAC_STATUS);
10085
10086                         phy_event = 0;
10087                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10088                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10089                                         phy_event = 1;
10090                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10091                                 phy_event = 1;
10092
10093                         if (phy_event)
10094                                 tg3_setup_phy(tp, 0);
10095                 } else if (tg3_flag(tp, POLL_SERDES)) {
10096                         u32 mac_stat = tr32(MAC_STATUS);
10097                         int need_setup = 0;
10098
10099                         if (tp->link_up &&
10100                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10101                                 need_setup = 1;
10102                         }
10103                         if (!tp->link_up &&
10104                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10105                                          MAC_STATUS_SIGNAL_DET))) {
10106                                 need_setup = 1;
10107                         }
10108                         if (need_setup) {
10109                                 if (!tp->serdes_counter) {
10110                                         tw32_f(MAC_MODE,
10111                                              (tp->mac_mode &
10112                                               ~MAC_MODE_PORT_MODE_MASK));
10113                                         udelay(40);
10114                                         tw32_f(MAC_MODE, tp->mac_mode);
10115                                         udelay(40);
10116                                 }
10117                                 tg3_setup_phy(tp, 0);
10118                         }
10119                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10120                            tg3_flag(tp, 5780_CLASS)) {
10121                         tg3_serdes_parallel_detect(tp);
10122                 }
10123
10124                 tp->timer_counter = tp->timer_multiplier;
10125         }
10126
10127         /* Heartbeat is only sent once every 2 seconds.
10128          *
10129          * The heartbeat is to tell the ASF firmware that the host
10130          * driver is still alive.  In the event that the OS crashes,
10131          * ASF needs to reset the hardware to free up the FIFO space
10132          * that may be filled with rx packets destined for the host.
10133          * If the FIFO is full, ASF will no longer function properly.
10134          *
10135          * Unintended resets have been reported on real time kernels
10136          * where the timer doesn't run on time.  Netpoll will also have
10137          * same problem.
10138          *
10139          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10140          * to check the ring condition when the heartbeat is expiring
10141          * before doing the reset.  This will prevent most unintended
10142          * resets.
10143          */
10144         if (!--tp->asf_counter) {
10145                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10146                         tg3_wait_for_event_ack(tp);
10147
10148                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10149                                       FWCMD_NICDRV_ALIVE3);
10150                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10151                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10152                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10153
10154                         tg3_generate_fw_event(tp);
10155                 }
10156                 tp->asf_counter = tp->asf_multiplier;
10157         }
10158
10159         spin_unlock(&tp->lock);
10160
10161 restart_timer:
10162         tp->timer.expires = jiffies + tp->timer_offset;
10163         add_timer(&tp->timer);
10164 }
10165
10166 static void tg3_timer_init(struct tg3 *tp)
10167 {
10168         if (tg3_flag(tp, TAGGED_STATUS) &&
10169             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
10170             !tg3_flag(tp, 57765_CLASS))
10171                 tp->timer_offset = HZ;
10172         else
10173                 tp->timer_offset = HZ / 10;
10174
10175         BUG_ON(tp->timer_offset > HZ);
10176
10177         tp->timer_multiplier = (HZ / tp->timer_offset);
10178         tp->asf_multiplier = (HZ / tp->timer_offset) *
10179                              TG3_FW_UPDATE_FREQ_SEC;
10180
10181         init_timer(&tp->timer);
10182         tp->timer.data = (unsigned long) tp;
10183         tp->timer.function = tg3_timer;
10184 }
10185
10186 static void tg3_timer_start(struct tg3 *tp)
10187 {
10188         tp->asf_counter   = tp->asf_multiplier;
10189         tp->timer_counter = tp->timer_multiplier;
10190
10191         tp->timer.expires = jiffies + tp->timer_offset;
10192         add_timer(&tp->timer);
10193 }
10194
10195 static void tg3_timer_stop(struct tg3 *tp)
10196 {
10197         del_timer_sync(&tp->timer);
10198 }
10199
10200 /* Restart hardware after configuration changes, self-test, etc.
10201  * Invoked with tp->lock held.
10202  */
10203 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10204         __releases(tp->lock)
10205         __acquires(tp->lock)
10206 {
10207         int err;
10208
10209         err = tg3_init_hw(tp, reset_phy);
10210         if (err) {
10211                 netdev_err(tp->dev,
10212                            "Failed to re-initialize device, aborting\n");
10213                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10214                 tg3_full_unlock(tp);
10215                 tg3_timer_stop(tp);
10216                 tp->irq_sync = 0;
10217                 tg3_napi_enable(tp);
10218                 dev_close(tp->dev);
10219                 tg3_full_lock(tp, 0);
10220         }
10221         return err;
10222 }
10223
10224 static void tg3_reset_task(struct work_struct *work)
10225 {
10226         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10227         int err;
10228
10229         tg3_full_lock(tp, 0);
10230
10231         if (!netif_running(tp->dev)) {
10232                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10233                 tg3_full_unlock(tp);
10234                 return;
10235         }
10236
10237         tg3_full_unlock(tp);
10238
10239         tg3_phy_stop(tp);
10240
10241         tg3_netif_stop(tp);
10242
10243         tg3_full_lock(tp, 1);
10244
10245         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10246                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10247                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10248                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10249                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10250         }
10251
10252         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10253         err = tg3_init_hw(tp, 1);
10254         if (err)
10255                 goto out;
10256
10257         tg3_netif_start(tp);
10258
10259 out:
10260         tg3_full_unlock(tp);
10261
10262         if (!err)
10263                 tg3_phy_start(tp);
10264
10265         tg3_flag_clear(tp, RESET_TASK_PENDING);
10266 }
10267
10268 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10269 {
10270         irq_handler_t fn;
10271         unsigned long flags;
10272         char *name;
10273         struct tg3_napi *tnapi = &tp->napi[irq_num];
10274
10275         if (tp->irq_cnt == 1)
10276                 name = tp->dev->name;
10277         else {
10278                 name = &tnapi->irq_lbl[0];
10279                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10280                 name[IFNAMSIZ-1] = 0;
10281         }
10282
10283         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10284                 fn = tg3_msi;
10285                 if (tg3_flag(tp, 1SHOT_MSI))
10286                         fn = tg3_msi_1shot;
10287                 flags = 0;
10288         } else {
10289                 fn = tg3_interrupt;
10290                 if (tg3_flag(tp, TAGGED_STATUS))
10291                         fn = tg3_interrupt_tagged;
10292                 flags = IRQF_SHARED;
10293         }
10294
10295         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10296 }
10297
10298 static int tg3_test_interrupt(struct tg3 *tp)
10299 {
10300         struct tg3_napi *tnapi = &tp->napi[0];
10301         struct net_device *dev = tp->dev;
10302         int err, i, intr_ok = 0;
10303         u32 val;
10304
10305         if (!netif_running(dev))
10306                 return -ENODEV;
10307
10308         tg3_disable_ints(tp);
10309
10310         free_irq(tnapi->irq_vec, tnapi);
10311
10312         /*
10313          * Turn off MSI one shot mode.  Otherwise this test has no
10314          * observable way to know whether the interrupt was delivered.
10315          */
10316         if (tg3_flag(tp, 57765_PLUS)) {
10317                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10318                 tw32(MSGINT_MODE, val);
10319         }
10320
10321         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10322                           IRQF_SHARED, dev->name, tnapi);
10323         if (err)
10324                 return err;
10325
10326         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10327         tg3_enable_ints(tp);
10328
10329         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10330                tnapi->coal_now);
10331
10332         for (i = 0; i < 5; i++) {
10333                 u32 int_mbox, misc_host_ctrl;
10334
10335                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10336                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10337
10338                 if ((int_mbox != 0) ||
10339                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10340                         intr_ok = 1;
10341                         break;
10342                 }
10343
10344                 if (tg3_flag(tp, 57765_PLUS) &&
10345                     tnapi->hw_status->status_tag != tnapi->last_tag)
10346                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10347
10348                 msleep(10);
10349         }
10350
10351         tg3_disable_ints(tp);
10352
10353         free_irq(tnapi->irq_vec, tnapi);
10354
10355         err = tg3_request_irq(tp, 0);
10356
10357         if (err)
10358                 return err;
10359
10360         if (intr_ok) {
10361                 /* Reenable MSI one shot mode. */
10362                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10363                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10364                         tw32(MSGINT_MODE, val);
10365                 }
10366                 return 0;
10367         }
10368
10369         return -EIO;
10370 }
10371
10372 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10373  * successfully restored
10374  */
10375 static int tg3_test_msi(struct tg3 *tp)
10376 {
10377         int err;
10378         u16 pci_cmd;
10379
10380         if (!tg3_flag(tp, USING_MSI))
10381                 return 0;
10382
10383         /* Turn off SERR reporting in case MSI terminates with Master
10384          * Abort.
10385          */
10386         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10387         pci_write_config_word(tp->pdev, PCI_COMMAND,
10388                               pci_cmd & ~PCI_COMMAND_SERR);
10389
10390         err = tg3_test_interrupt(tp);
10391
10392         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10393
10394         if (!err)
10395                 return 0;
10396
10397         /* other failures */
10398         if (err != -EIO)
10399                 return err;
10400
10401         /* MSI test failed, go back to INTx mode */
10402         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10403                     "to INTx mode. Please report this failure to the PCI "
10404                     "maintainer and include system chipset information\n");
10405
10406         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10407
10408         pci_disable_msi(tp->pdev);
10409
10410         tg3_flag_clear(tp, USING_MSI);
10411         tp->napi[0].irq_vec = tp->pdev->irq;
10412
10413         err = tg3_request_irq(tp, 0);
10414         if (err)
10415                 return err;
10416
10417         /* Need to reset the chip because the MSI cycle may have terminated
10418          * with Master Abort.
10419          */
10420         tg3_full_lock(tp, 1);
10421
10422         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10423         err = tg3_init_hw(tp, 1);
10424
10425         tg3_full_unlock(tp);
10426
10427         if (err)
10428                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10429
10430         return err;
10431 }
10432
10433 static int tg3_request_firmware(struct tg3 *tp)
10434 {
10435         const __be32 *fw_data;
10436
10437         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10438                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10439                            tp->fw_needed);
10440                 return -ENOENT;
10441         }
10442
10443         fw_data = (void *)tp->fw->data;
10444
10445         /* Firmware blob starts with version numbers, followed by
10446          * start address and _full_ length including BSS sections
10447          * (which must be longer than the actual data, of course
10448          */
10449
10450         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10451         if (tp->fw_len < (tp->fw->size - 12)) {
10452                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10453                            tp->fw_len, tp->fw_needed);
10454                 release_firmware(tp->fw);
10455                 tp->fw = NULL;
10456                 return -EINVAL;
10457         }
10458
10459         /* We no longer need firmware; we have it. */
10460         tp->fw_needed = NULL;
10461         return 0;
10462 }
10463
10464 static u32 tg3_irq_count(struct tg3 *tp)
10465 {
10466         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10467
10468         if (irq_cnt > 1) {
10469                 /* We want as many rx rings enabled as there are cpus.
10470                  * In multiqueue MSI-X mode, the first MSI-X vector
10471                  * only deals with link interrupts, etc, so we add
10472                  * one to the number of vectors we are requesting.
10473                  */
10474                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10475         }
10476
10477         return irq_cnt;
10478 }
10479
10480 static bool tg3_enable_msix(struct tg3 *tp)
10481 {
10482         int i, rc;
10483         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10484
10485         tp->txq_cnt = tp->txq_req;
10486         tp->rxq_cnt = tp->rxq_req;
10487         if (!tp->rxq_cnt)
10488                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10489         if (tp->rxq_cnt > tp->rxq_max)
10490                 tp->rxq_cnt = tp->rxq_max;
10491
10492         /* Disable multiple TX rings by default.  Simple round-robin hardware
10493          * scheduling of the TX rings can cause starvation of rings with
10494          * small packets when other rings have TSO or jumbo packets.
10495          */
10496         if (!tp->txq_req)
10497                 tp->txq_cnt = 1;
10498
10499         tp->irq_cnt = tg3_irq_count(tp);
10500
10501         for (i = 0; i < tp->irq_max; i++) {
10502                 msix_ent[i].entry  = i;
10503                 msix_ent[i].vector = 0;
10504         }
10505
10506         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10507         if (rc < 0) {
10508                 return false;
10509         } else if (rc != 0) {
10510                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10511                         return false;
10512                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10513                               tp->irq_cnt, rc);
10514                 tp->irq_cnt = rc;
10515                 tp->rxq_cnt = max(rc - 1, 1);
10516                 if (tp->txq_cnt)
10517                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10518         }
10519
10520         for (i = 0; i < tp->irq_max; i++)
10521                 tp->napi[i].irq_vec = msix_ent[i].vector;
10522
10523         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10524                 pci_disable_msix(tp->pdev);
10525                 return false;
10526         }
10527
10528         if (tp->irq_cnt == 1)
10529                 return true;
10530
10531         tg3_flag_set(tp, ENABLE_RSS);
10532
10533         if (tp->txq_cnt > 1)
10534                 tg3_flag_set(tp, ENABLE_TSS);
10535
10536         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10537
10538         return true;
10539 }
10540
10541 static void tg3_ints_init(struct tg3 *tp)
10542 {
10543         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10544             !tg3_flag(tp, TAGGED_STATUS)) {
10545                 /* All MSI supporting chips should support tagged
10546                  * status.  Assert that this is the case.
10547                  */
10548                 netdev_warn(tp->dev,
10549                             "MSI without TAGGED_STATUS? Not using MSI\n");
10550                 goto defcfg;
10551         }
10552
10553         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10554                 tg3_flag_set(tp, USING_MSIX);
10555         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10556                 tg3_flag_set(tp, USING_MSI);
10557
10558         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10559                 u32 msi_mode = tr32(MSGINT_MODE);
10560                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10561                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10562                 if (!tg3_flag(tp, 1SHOT_MSI))
10563                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10564                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10565         }
10566 defcfg:
10567         if (!tg3_flag(tp, USING_MSIX)) {
10568                 tp->irq_cnt = 1;
10569                 tp->napi[0].irq_vec = tp->pdev->irq;
10570         }
10571
10572         if (tp->irq_cnt == 1) {
10573                 tp->txq_cnt = 1;
10574                 tp->rxq_cnt = 1;
10575                 netif_set_real_num_tx_queues(tp->dev, 1);
10576                 netif_set_real_num_rx_queues(tp->dev, 1);
10577         }
10578 }
10579
10580 static void tg3_ints_fini(struct tg3 *tp)
10581 {
10582         if (tg3_flag(tp, USING_MSIX))
10583                 pci_disable_msix(tp->pdev);
10584         else if (tg3_flag(tp, USING_MSI))
10585                 pci_disable_msi(tp->pdev);
10586         tg3_flag_clear(tp, USING_MSI);
10587         tg3_flag_clear(tp, USING_MSIX);
10588         tg3_flag_clear(tp, ENABLE_RSS);
10589         tg3_flag_clear(tp, ENABLE_TSS);
10590 }
10591
10592 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10593                      bool init)
10594 {
10595         struct net_device *dev = tp->dev;
10596         int i, err;
10597
10598         /*
10599          * Setup interrupts first so we know how
10600          * many NAPI resources to allocate
10601          */
10602         tg3_ints_init(tp);
10603
10604         tg3_rss_check_indir_tbl(tp);
10605
10606         /* The placement of this call is tied
10607          * to the setup and use of Host TX descriptors.
10608          */
10609         err = tg3_alloc_consistent(tp);
10610         if (err)
10611                 goto err_out1;
10612
10613         tg3_napi_init(tp);
10614
10615         tg3_napi_enable(tp);
10616
10617         for (i = 0; i < tp->irq_cnt; i++) {
10618                 struct tg3_napi *tnapi = &tp->napi[i];
10619                 err = tg3_request_irq(tp, i);
10620                 if (err) {
10621                         for (i--; i >= 0; i--) {
10622                                 tnapi = &tp->napi[i];
10623                                 free_irq(tnapi->irq_vec, tnapi);
10624                         }
10625                         goto err_out2;
10626                 }
10627         }
10628
10629         tg3_full_lock(tp, 0);
10630
10631         err = tg3_init_hw(tp, reset_phy);
10632         if (err) {
10633                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10634                 tg3_free_rings(tp);
10635         }
10636
10637         tg3_full_unlock(tp);
10638
10639         if (err)
10640                 goto err_out3;
10641
10642         if (test_irq && tg3_flag(tp, USING_MSI)) {
10643                 err = tg3_test_msi(tp);
10644
10645                 if (err) {
10646                         tg3_full_lock(tp, 0);
10647                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10648                         tg3_free_rings(tp);
10649                         tg3_full_unlock(tp);
10650
10651                         goto err_out2;
10652                 }
10653
10654                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10655                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10656
10657                         tw32(PCIE_TRANSACTION_CFG,
10658                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10659                 }
10660         }
10661
10662         tg3_phy_start(tp);
10663
10664         tg3_hwmon_open(tp);
10665
10666         tg3_full_lock(tp, 0);
10667
10668         tg3_timer_start(tp);
10669         tg3_flag_set(tp, INIT_COMPLETE);
10670         tg3_enable_ints(tp);
10671
10672         if (init)
10673                 tg3_ptp_init(tp);
10674         else
10675                 tg3_ptp_resume(tp);
10676
10677
10678         tg3_full_unlock(tp);
10679
10680         netif_tx_start_all_queues(dev);
10681
10682         /*
10683          * Reset loopback feature if it was turned on while the device was down
10684          * make sure that it's installed properly now.
10685          */
10686         if (dev->features & NETIF_F_LOOPBACK)
10687                 tg3_set_loopback(dev, dev->features);
10688
10689         return 0;
10690
10691 err_out3:
10692         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10693                 struct tg3_napi *tnapi = &tp->napi[i];
10694                 free_irq(tnapi->irq_vec, tnapi);
10695         }
10696
10697 err_out2:
10698         tg3_napi_disable(tp);
10699         tg3_napi_fini(tp);
10700         tg3_free_consistent(tp);
10701
10702 err_out1:
10703         tg3_ints_fini(tp);
10704
10705         return err;
10706 }
10707
10708 static void tg3_stop(struct tg3 *tp)
10709 {
10710         int i;
10711
10712         tg3_reset_task_cancel(tp);
10713         tg3_netif_stop(tp);
10714
10715         tg3_timer_stop(tp);
10716
10717         tg3_hwmon_close(tp);
10718
10719         tg3_phy_stop(tp);
10720
10721         tg3_full_lock(tp, 1);
10722
10723         tg3_disable_ints(tp);
10724
10725         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10726         tg3_free_rings(tp);
10727         tg3_flag_clear(tp, INIT_COMPLETE);
10728
10729         tg3_full_unlock(tp);
10730
10731         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10732                 struct tg3_napi *tnapi = &tp->napi[i];
10733                 free_irq(tnapi->irq_vec, tnapi);
10734         }
10735
10736         tg3_ints_fini(tp);
10737
10738         tg3_napi_fini(tp);
10739
10740         tg3_free_consistent(tp);
10741 }
10742
10743 static int tg3_open(struct net_device *dev)
10744 {
10745         struct tg3 *tp = netdev_priv(dev);
10746         int err;
10747
10748         if (tp->fw_needed) {
10749                 err = tg3_request_firmware(tp);
10750                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10751                         if (err)
10752                                 return err;
10753                 } else if (err) {
10754                         netdev_warn(tp->dev, "TSO capability disabled\n");
10755                         tg3_flag_clear(tp, TSO_CAPABLE);
10756                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10757                         netdev_notice(tp->dev, "TSO capability restored\n");
10758                         tg3_flag_set(tp, TSO_CAPABLE);
10759                 }
10760         }
10761
10762         tg3_carrier_off(tp);
10763
10764         err = tg3_power_up(tp);
10765         if (err)
10766                 return err;
10767
10768         tg3_full_lock(tp, 0);
10769
10770         tg3_disable_ints(tp);
10771         tg3_flag_clear(tp, INIT_COMPLETE);
10772
10773         tg3_full_unlock(tp);
10774
10775         err = tg3_start(tp, true, true, true);
10776         if (err) {
10777                 tg3_frob_aux_power(tp, false);
10778                 pci_set_power_state(tp->pdev, PCI_D3hot);
10779         }
10780
10781         if (tg3_flag(tp, PTP_CAPABLE)) {
10782                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
10783                                                    &tp->pdev->dev);
10784                 if (IS_ERR(tp->ptp_clock))
10785                         tp->ptp_clock = NULL;
10786         }
10787
10788         return err;
10789 }
10790
10791 static int tg3_close(struct net_device *dev)
10792 {
10793         struct tg3 *tp = netdev_priv(dev);
10794
10795         tg3_ptp_fini(tp);
10796
10797         tg3_stop(tp);
10798
10799         /* Clear stats across close / open calls */
10800         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10801         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10802
10803         tg3_power_down(tp);
10804
10805         tg3_carrier_off(tp);
10806
10807         return 0;
10808 }
10809
10810 static inline u64 get_stat64(tg3_stat64_t *val)
10811 {
10812        return ((u64)val->high << 32) | ((u64)val->low);
10813 }
10814
10815 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10816 {
10817         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10818
10819         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10820             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10821              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10822                 u32 val;
10823
10824                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10825                         tg3_writephy(tp, MII_TG3_TEST1,
10826                                      val | MII_TG3_TEST1_CRC_EN);
10827                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10828                 } else
10829                         val = 0;
10830
10831                 tp->phy_crc_errors += val;
10832
10833                 return tp->phy_crc_errors;
10834         }
10835
10836         return get_stat64(&hw_stats->rx_fcs_errors);
10837 }
10838
10839 #define ESTAT_ADD(member) \
10840         estats->member =        old_estats->member + \
10841                                 get_stat64(&hw_stats->member)
10842
10843 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10844 {
10845         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10846         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10847
10848         ESTAT_ADD(rx_octets);
10849         ESTAT_ADD(rx_fragments);
10850         ESTAT_ADD(rx_ucast_packets);
10851         ESTAT_ADD(rx_mcast_packets);
10852         ESTAT_ADD(rx_bcast_packets);
10853         ESTAT_ADD(rx_fcs_errors);
10854         ESTAT_ADD(rx_align_errors);
10855         ESTAT_ADD(rx_xon_pause_rcvd);
10856         ESTAT_ADD(rx_xoff_pause_rcvd);
10857         ESTAT_ADD(rx_mac_ctrl_rcvd);
10858         ESTAT_ADD(rx_xoff_entered);
10859         ESTAT_ADD(rx_frame_too_long_errors);
10860         ESTAT_ADD(rx_jabbers);
10861         ESTAT_ADD(rx_undersize_packets);
10862         ESTAT_ADD(rx_in_length_errors);
10863         ESTAT_ADD(rx_out_length_errors);
10864         ESTAT_ADD(rx_64_or_less_octet_packets);
10865         ESTAT_ADD(rx_65_to_127_octet_packets);
10866         ESTAT_ADD(rx_128_to_255_octet_packets);
10867         ESTAT_ADD(rx_256_to_511_octet_packets);
10868         ESTAT_ADD(rx_512_to_1023_octet_packets);
10869         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10870         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10871         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10872         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10873         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10874
10875         ESTAT_ADD(tx_octets);
10876         ESTAT_ADD(tx_collisions);
10877         ESTAT_ADD(tx_xon_sent);
10878         ESTAT_ADD(tx_xoff_sent);
10879         ESTAT_ADD(tx_flow_control);
10880         ESTAT_ADD(tx_mac_errors);
10881         ESTAT_ADD(tx_single_collisions);
10882         ESTAT_ADD(tx_mult_collisions);
10883         ESTAT_ADD(tx_deferred);
10884         ESTAT_ADD(tx_excessive_collisions);
10885         ESTAT_ADD(tx_late_collisions);
10886         ESTAT_ADD(tx_collide_2times);
10887         ESTAT_ADD(tx_collide_3times);
10888         ESTAT_ADD(tx_collide_4times);
10889         ESTAT_ADD(tx_collide_5times);
10890         ESTAT_ADD(tx_collide_6times);
10891         ESTAT_ADD(tx_collide_7times);
10892         ESTAT_ADD(tx_collide_8times);
10893         ESTAT_ADD(tx_collide_9times);
10894         ESTAT_ADD(tx_collide_10times);
10895         ESTAT_ADD(tx_collide_11times);
10896         ESTAT_ADD(tx_collide_12times);
10897         ESTAT_ADD(tx_collide_13times);
10898         ESTAT_ADD(tx_collide_14times);
10899         ESTAT_ADD(tx_collide_15times);
10900         ESTAT_ADD(tx_ucast_packets);
10901         ESTAT_ADD(tx_mcast_packets);
10902         ESTAT_ADD(tx_bcast_packets);
10903         ESTAT_ADD(tx_carrier_sense_errors);
10904         ESTAT_ADD(tx_discards);
10905         ESTAT_ADD(tx_errors);
10906
10907         ESTAT_ADD(dma_writeq_full);
10908         ESTAT_ADD(dma_write_prioq_full);
10909         ESTAT_ADD(rxbds_empty);
10910         ESTAT_ADD(rx_discards);
10911         ESTAT_ADD(rx_errors);
10912         ESTAT_ADD(rx_threshold_hit);
10913
10914         ESTAT_ADD(dma_readq_full);
10915         ESTAT_ADD(dma_read_prioq_full);
10916         ESTAT_ADD(tx_comp_queue_full);
10917
10918         ESTAT_ADD(ring_set_send_prod_index);
10919         ESTAT_ADD(ring_status_update);
10920         ESTAT_ADD(nic_irqs);
10921         ESTAT_ADD(nic_avoided_irqs);
10922         ESTAT_ADD(nic_tx_threshold_hit);
10923
10924         ESTAT_ADD(mbuf_lwm_thresh_hit);
10925 }
10926
10927 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10928 {
10929         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10930         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10931
10932         stats->rx_packets = old_stats->rx_packets +
10933                 get_stat64(&hw_stats->rx_ucast_packets) +
10934                 get_stat64(&hw_stats->rx_mcast_packets) +
10935                 get_stat64(&hw_stats->rx_bcast_packets);
10936
10937         stats->tx_packets = old_stats->tx_packets +
10938                 get_stat64(&hw_stats->tx_ucast_packets) +
10939                 get_stat64(&hw_stats->tx_mcast_packets) +
10940                 get_stat64(&hw_stats->tx_bcast_packets);
10941
10942         stats->rx_bytes = old_stats->rx_bytes +
10943                 get_stat64(&hw_stats->rx_octets);
10944         stats->tx_bytes = old_stats->tx_bytes +
10945                 get_stat64(&hw_stats->tx_octets);
10946
10947         stats->rx_errors = old_stats->rx_errors +
10948                 get_stat64(&hw_stats->rx_errors);
10949         stats->tx_errors = old_stats->tx_errors +
10950                 get_stat64(&hw_stats->tx_errors) +
10951                 get_stat64(&hw_stats->tx_mac_errors) +
10952                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10953                 get_stat64(&hw_stats->tx_discards);
10954
10955         stats->multicast = old_stats->multicast +
10956                 get_stat64(&hw_stats->rx_mcast_packets);
10957         stats->collisions = old_stats->collisions +
10958                 get_stat64(&hw_stats->tx_collisions);
10959
10960         stats->rx_length_errors = old_stats->rx_length_errors +
10961                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10962                 get_stat64(&hw_stats->rx_undersize_packets);
10963
10964         stats->rx_over_errors = old_stats->rx_over_errors +
10965                 get_stat64(&hw_stats->rxbds_empty);
10966         stats->rx_frame_errors = old_stats->rx_frame_errors +
10967                 get_stat64(&hw_stats->rx_align_errors);
10968         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10969                 get_stat64(&hw_stats->tx_discards);
10970         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10971                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10972
10973         stats->rx_crc_errors = old_stats->rx_crc_errors +
10974                 tg3_calc_crc_errors(tp);
10975
10976         stats->rx_missed_errors = old_stats->rx_missed_errors +
10977                 get_stat64(&hw_stats->rx_discards);
10978
10979         stats->rx_dropped = tp->rx_dropped;
10980         stats->tx_dropped = tp->tx_dropped;
10981 }
10982
10983 static int tg3_get_regs_len(struct net_device *dev)
10984 {
10985         return TG3_REG_BLK_SIZE;
10986 }
10987
10988 static void tg3_get_regs(struct net_device *dev,
10989                 struct ethtool_regs *regs, void *_p)
10990 {
10991         struct tg3 *tp = netdev_priv(dev);
10992
10993         regs->version = 0;
10994
10995         memset(_p, 0, TG3_REG_BLK_SIZE);
10996
10997         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10998                 return;
10999
11000         tg3_full_lock(tp, 0);
11001
11002         tg3_dump_legacy_regs(tp, (u32 *)_p);
11003
11004         tg3_full_unlock(tp);
11005 }
11006
11007 static int tg3_get_eeprom_len(struct net_device *dev)
11008 {
11009         struct tg3 *tp = netdev_priv(dev);
11010
11011         return tp->nvram_size;
11012 }
11013
11014 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11015 {
11016         struct tg3 *tp = netdev_priv(dev);
11017         int ret;
11018         u8  *pd;
11019         u32 i, offset, len, b_offset, b_count;
11020         __be32 val;
11021
11022         if (tg3_flag(tp, NO_NVRAM))
11023                 return -EINVAL;
11024
11025         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11026                 return -EAGAIN;
11027
11028         offset = eeprom->offset;
11029         len = eeprom->len;
11030         eeprom->len = 0;
11031
11032         eeprom->magic = TG3_EEPROM_MAGIC;
11033
11034         if (offset & 3) {
11035                 /* adjustments to start on required 4 byte boundary */
11036                 b_offset = offset & 3;
11037                 b_count = 4 - b_offset;
11038                 if (b_count > len) {
11039                         /* i.e. offset=1 len=2 */
11040                         b_count = len;
11041                 }
11042                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11043                 if (ret)
11044                         return ret;
11045                 memcpy(data, ((char *)&val) + b_offset, b_count);
11046                 len -= b_count;
11047                 offset += b_count;
11048                 eeprom->len += b_count;
11049         }
11050
11051         /* read bytes up to the last 4 byte boundary */
11052         pd = &data[eeprom->len];
11053         for (i = 0; i < (len - (len & 3)); i += 4) {
11054                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11055                 if (ret) {
11056                         eeprom->len += i;
11057                         return ret;
11058                 }
11059                 memcpy(pd + i, &val, 4);
11060         }
11061         eeprom->len += i;
11062
11063         if (len & 3) {
11064                 /* read last bytes not ending on 4 byte boundary */
11065                 pd = &data[eeprom->len];
11066                 b_count = len & 3;
11067                 b_offset = offset + len - b_count;
11068                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11069                 if (ret)
11070                         return ret;
11071                 memcpy(pd, &val, b_count);
11072                 eeprom->len += b_count;
11073         }
11074         return 0;
11075 }
11076
11077 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11078 {
11079         struct tg3 *tp = netdev_priv(dev);
11080         int ret;
11081         u32 offset, len, b_offset, odd_len;
11082         u8 *buf;
11083         __be32 start, end;
11084
11085         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11086                 return -EAGAIN;
11087
11088         if (tg3_flag(tp, NO_NVRAM) ||
11089             eeprom->magic != TG3_EEPROM_MAGIC)
11090                 return -EINVAL;
11091
11092         offset = eeprom->offset;
11093         len = eeprom->len;
11094
11095         if ((b_offset = (offset & 3))) {
11096                 /* adjustments to start on required 4 byte boundary */
11097                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11098                 if (ret)
11099                         return ret;
11100                 len += b_offset;
11101                 offset &= ~3;
11102                 if (len < 4)
11103                         len = 4;
11104         }
11105
11106         odd_len = 0;
11107         if (len & 3) {
11108                 /* adjustments to end on required 4 byte boundary */
11109                 odd_len = 1;
11110                 len = (len + 3) & ~3;
11111                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11112                 if (ret)
11113                         return ret;
11114         }
11115
11116         buf = data;
11117         if (b_offset || odd_len) {
11118                 buf = kmalloc(len, GFP_KERNEL);
11119                 if (!buf)
11120                         return -ENOMEM;
11121                 if (b_offset)
11122                         memcpy(buf, &start, 4);
11123                 if (odd_len)
11124                         memcpy(buf+len-4, &end, 4);
11125                 memcpy(buf + b_offset, data, eeprom->len);
11126         }
11127
11128         ret = tg3_nvram_write_block(tp, offset, len, buf);
11129
11130         if (buf != data)
11131                 kfree(buf);
11132
11133         return ret;
11134 }
11135
11136 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11137 {
11138         struct tg3 *tp = netdev_priv(dev);
11139
11140         if (tg3_flag(tp, USE_PHYLIB)) {
11141                 struct phy_device *phydev;
11142                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11143                         return -EAGAIN;
11144                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11145                 return phy_ethtool_gset(phydev, cmd);
11146         }
11147
11148         cmd->supported = (SUPPORTED_Autoneg);
11149
11150         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11151                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11152                                    SUPPORTED_1000baseT_Full);
11153
11154         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11155                 cmd->supported |= (SUPPORTED_100baseT_Half |
11156                                   SUPPORTED_100baseT_Full |
11157                                   SUPPORTED_10baseT_Half |
11158                                   SUPPORTED_10baseT_Full |
11159                                   SUPPORTED_TP);
11160                 cmd->port = PORT_TP;
11161         } else {
11162                 cmd->supported |= SUPPORTED_FIBRE;
11163                 cmd->port = PORT_FIBRE;
11164         }
11165
11166         cmd->advertising = tp->link_config.advertising;
11167         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11168                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11169                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11170                                 cmd->advertising |= ADVERTISED_Pause;
11171                         } else {
11172                                 cmd->advertising |= ADVERTISED_Pause |
11173                                                     ADVERTISED_Asym_Pause;
11174                         }
11175                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11176                         cmd->advertising |= ADVERTISED_Asym_Pause;
11177                 }
11178         }
11179         if (netif_running(dev) && tp->link_up) {
11180                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11181                 cmd->duplex = tp->link_config.active_duplex;
11182                 cmd->lp_advertising = tp->link_config.rmt_adv;
11183                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11184                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11185                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11186                         else
11187                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11188                 }
11189         } else {
11190                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11191                 cmd->duplex = DUPLEX_UNKNOWN;
11192                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11193         }
11194         cmd->phy_address = tp->phy_addr;
11195         cmd->transceiver = XCVR_INTERNAL;
11196         cmd->autoneg = tp->link_config.autoneg;
11197         cmd->maxtxpkt = 0;
11198         cmd->maxrxpkt = 0;
11199         return 0;
11200 }
11201
11202 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11203 {
11204         struct tg3 *tp = netdev_priv(dev);
11205         u32 speed = ethtool_cmd_speed(cmd);
11206
11207         if (tg3_flag(tp, USE_PHYLIB)) {
11208                 struct phy_device *phydev;
11209                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11210                         return -EAGAIN;
11211                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11212                 return phy_ethtool_sset(phydev, cmd);
11213         }
11214
11215         if (cmd->autoneg != AUTONEG_ENABLE &&
11216             cmd->autoneg != AUTONEG_DISABLE)
11217                 return -EINVAL;
11218
11219         if (cmd->autoneg == AUTONEG_DISABLE &&
11220             cmd->duplex != DUPLEX_FULL &&
11221             cmd->duplex != DUPLEX_HALF)
11222                 return -EINVAL;
11223
11224         if (cmd->autoneg == AUTONEG_ENABLE) {
11225                 u32 mask = ADVERTISED_Autoneg |
11226                            ADVERTISED_Pause |
11227                            ADVERTISED_Asym_Pause;
11228
11229                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11230                         mask |= ADVERTISED_1000baseT_Half |
11231                                 ADVERTISED_1000baseT_Full;
11232
11233                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11234                         mask |= ADVERTISED_100baseT_Half |
11235                                 ADVERTISED_100baseT_Full |
11236                                 ADVERTISED_10baseT_Half |
11237                                 ADVERTISED_10baseT_Full |
11238                                 ADVERTISED_TP;
11239                 else
11240                         mask |= ADVERTISED_FIBRE;
11241
11242                 if (cmd->advertising & ~mask)
11243                         return -EINVAL;
11244
11245                 mask &= (ADVERTISED_1000baseT_Half |
11246                          ADVERTISED_1000baseT_Full |
11247                          ADVERTISED_100baseT_Half |
11248                          ADVERTISED_100baseT_Full |
11249                          ADVERTISED_10baseT_Half |
11250                          ADVERTISED_10baseT_Full);
11251
11252                 cmd->advertising &= mask;
11253         } else {
11254                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11255                         if (speed != SPEED_1000)
11256                                 return -EINVAL;
11257
11258                         if (cmd->duplex != DUPLEX_FULL)
11259                                 return -EINVAL;
11260                 } else {
11261                         if (speed != SPEED_100 &&
11262                             speed != SPEED_10)
11263                                 return -EINVAL;
11264                 }
11265         }
11266
11267         tg3_full_lock(tp, 0);
11268
11269         tp->link_config.autoneg = cmd->autoneg;
11270         if (cmd->autoneg == AUTONEG_ENABLE) {
11271                 tp->link_config.advertising = (cmd->advertising |
11272                                               ADVERTISED_Autoneg);
11273                 tp->link_config.speed = SPEED_UNKNOWN;
11274                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11275         } else {
11276                 tp->link_config.advertising = 0;
11277                 tp->link_config.speed = speed;
11278                 tp->link_config.duplex = cmd->duplex;
11279         }
11280
11281         if (netif_running(dev))
11282                 tg3_setup_phy(tp, 1);
11283
11284         tg3_full_unlock(tp);
11285
11286         return 0;
11287 }
11288
11289 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11290 {
11291         struct tg3 *tp = netdev_priv(dev);
11292
11293         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11294         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11295         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11296         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11297 }
11298
11299 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11300 {
11301         struct tg3 *tp = netdev_priv(dev);
11302
11303         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11304                 wol->supported = WAKE_MAGIC;
11305         else
11306                 wol->supported = 0;
11307         wol->wolopts = 0;
11308         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11309                 wol->wolopts = WAKE_MAGIC;
11310         memset(&wol->sopass, 0, sizeof(wol->sopass));
11311 }
11312
11313 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11314 {
11315         struct tg3 *tp = netdev_priv(dev);
11316         struct device *dp = &tp->pdev->dev;
11317
11318         if (wol->wolopts & ~WAKE_MAGIC)
11319                 return -EINVAL;
11320         if ((wol->wolopts & WAKE_MAGIC) &&
11321             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11322                 return -EINVAL;
11323
11324         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11325
11326         spin_lock_bh(&tp->lock);
11327         if (device_may_wakeup(dp))
11328                 tg3_flag_set(tp, WOL_ENABLE);
11329         else
11330                 tg3_flag_clear(tp, WOL_ENABLE);
11331         spin_unlock_bh(&tp->lock);
11332
11333         return 0;
11334 }
11335
11336 static u32 tg3_get_msglevel(struct net_device *dev)
11337 {
11338         struct tg3 *tp = netdev_priv(dev);
11339         return tp->msg_enable;
11340 }
11341
11342 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11343 {
11344         struct tg3 *tp = netdev_priv(dev);
11345         tp->msg_enable = value;
11346 }
11347
11348 static int tg3_nway_reset(struct net_device *dev)
11349 {
11350         struct tg3 *tp = netdev_priv(dev);
11351         int r;
11352
11353         if (!netif_running(dev))
11354                 return -EAGAIN;
11355
11356         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11357                 return -EINVAL;
11358
11359         if (tg3_flag(tp, USE_PHYLIB)) {
11360                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11361                         return -EAGAIN;
11362                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11363         } else {
11364                 u32 bmcr;
11365
11366                 spin_lock_bh(&tp->lock);
11367                 r = -EINVAL;
11368                 tg3_readphy(tp, MII_BMCR, &bmcr);
11369                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11370                     ((bmcr & BMCR_ANENABLE) ||
11371                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11372                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11373                                                    BMCR_ANENABLE);
11374                         r = 0;
11375                 }
11376                 spin_unlock_bh(&tp->lock);
11377         }
11378
11379         return r;
11380 }
11381
11382 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11383 {
11384         struct tg3 *tp = netdev_priv(dev);
11385
11386         ering->rx_max_pending = tp->rx_std_ring_mask;
11387         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11388                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11389         else
11390                 ering->rx_jumbo_max_pending = 0;
11391
11392         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11393
11394         ering->rx_pending = tp->rx_pending;
11395         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11396                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11397         else
11398                 ering->rx_jumbo_pending = 0;
11399
11400         ering->tx_pending = tp->napi[0].tx_pending;
11401 }
11402
11403 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11404 {
11405         struct tg3 *tp = netdev_priv(dev);
11406         int i, irq_sync = 0, err = 0;
11407
11408         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11409             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11410             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11411             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11412             (tg3_flag(tp, TSO_BUG) &&
11413              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11414                 return -EINVAL;
11415
11416         if (netif_running(dev)) {
11417                 tg3_phy_stop(tp);
11418                 tg3_netif_stop(tp);
11419                 irq_sync = 1;
11420         }
11421
11422         tg3_full_lock(tp, irq_sync);
11423
11424         tp->rx_pending = ering->rx_pending;
11425
11426         if (tg3_flag(tp, MAX_RXPEND_64) &&
11427             tp->rx_pending > 63)
11428                 tp->rx_pending = 63;
11429         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11430
11431         for (i = 0; i < tp->irq_max; i++)
11432                 tp->napi[i].tx_pending = ering->tx_pending;
11433
11434         if (netif_running(dev)) {
11435                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11436                 err = tg3_restart_hw(tp, 1);
11437                 if (!err)
11438                         tg3_netif_start(tp);
11439         }
11440
11441         tg3_full_unlock(tp);
11442
11443         if (irq_sync && !err)
11444                 tg3_phy_start(tp);
11445
11446         return err;
11447 }
11448
11449 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11450 {
11451         struct tg3 *tp = netdev_priv(dev);
11452
11453         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11454
11455         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11456                 epause->rx_pause = 1;
11457         else
11458                 epause->rx_pause = 0;
11459
11460         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11461                 epause->tx_pause = 1;
11462         else
11463                 epause->tx_pause = 0;
11464 }
11465
11466 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11467 {
11468         struct tg3 *tp = netdev_priv(dev);
11469         int err = 0;
11470
11471         if (tg3_flag(tp, USE_PHYLIB)) {
11472                 u32 newadv;
11473                 struct phy_device *phydev;
11474
11475                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11476
11477                 if (!(phydev->supported & SUPPORTED_Pause) ||
11478                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11479                      (epause->rx_pause != epause->tx_pause)))
11480                         return -EINVAL;
11481
11482                 tp->link_config.flowctrl = 0;
11483                 if (epause->rx_pause) {
11484                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11485
11486                         if (epause->tx_pause) {
11487                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11488                                 newadv = ADVERTISED_Pause;
11489                         } else
11490                                 newadv = ADVERTISED_Pause |
11491                                          ADVERTISED_Asym_Pause;
11492                 } else if (epause->tx_pause) {
11493                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11494                         newadv = ADVERTISED_Asym_Pause;
11495                 } else
11496                         newadv = 0;
11497
11498                 if (epause->autoneg)
11499                         tg3_flag_set(tp, PAUSE_AUTONEG);
11500                 else
11501                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11502
11503                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11504                         u32 oldadv = phydev->advertising &
11505                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11506                         if (oldadv != newadv) {
11507                                 phydev->advertising &=
11508                                         ~(ADVERTISED_Pause |
11509                                           ADVERTISED_Asym_Pause);
11510                                 phydev->advertising |= newadv;
11511                                 if (phydev->autoneg) {
11512                                         /*
11513                                          * Always renegotiate the link to
11514                                          * inform our link partner of our
11515                                          * flow control settings, even if the
11516                                          * flow control is forced.  Let
11517                                          * tg3_adjust_link() do the final
11518                                          * flow control setup.
11519                                          */
11520                                         return phy_start_aneg(phydev);
11521                                 }
11522                         }
11523
11524                         if (!epause->autoneg)
11525                                 tg3_setup_flow_control(tp, 0, 0);
11526                 } else {
11527                         tp->link_config.advertising &=
11528                                         ~(ADVERTISED_Pause |
11529                                           ADVERTISED_Asym_Pause);
11530                         tp->link_config.advertising |= newadv;
11531                 }
11532         } else {
11533                 int irq_sync = 0;
11534
11535                 if (netif_running(dev)) {
11536                         tg3_netif_stop(tp);
11537                         irq_sync = 1;
11538                 }
11539
11540                 tg3_full_lock(tp, irq_sync);
11541
11542                 if (epause->autoneg)
11543                         tg3_flag_set(tp, PAUSE_AUTONEG);
11544                 else
11545                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11546                 if (epause->rx_pause)
11547                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11548                 else
11549                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11550                 if (epause->tx_pause)
11551                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11552                 else
11553                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11554
11555                 if (netif_running(dev)) {
11556                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11557                         err = tg3_restart_hw(tp, 1);
11558                         if (!err)
11559                                 tg3_netif_start(tp);
11560                 }
11561
11562                 tg3_full_unlock(tp);
11563         }
11564
11565         return err;
11566 }
11567
11568 static int tg3_get_sset_count(struct net_device *dev, int sset)
11569 {
11570         switch (sset) {
11571         case ETH_SS_TEST:
11572                 return TG3_NUM_TEST;
11573         case ETH_SS_STATS:
11574                 return TG3_NUM_STATS;
11575         default:
11576                 return -EOPNOTSUPP;
11577         }
11578 }
11579
11580 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11581                          u32 *rules __always_unused)
11582 {
11583         struct tg3 *tp = netdev_priv(dev);
11584
11585         if (!tg3_flag(tp, SUPPORT_MSIX))
11586                 return -EOPNOTSUPP;
11587
11588         switch (info->cmd) {
11589         case ETHTOOL_GRXRINGS:
11590                 if (netif_running(tp->dev))
11591                         info->data = tp->rxq_cnt;
11592                 else {
11593                         info->data = num_online_cpus();
11594                         if (info->data > TG3_RSS_MAX_NUM_QS)
11595                                 info->data = TG3_RSS_MAX_NUM_QS;
11596                 }
11597
11598                 /* The first interrupt vector only
11599                  * handles link interrupts.
11600                  */
11601                 info->data -= 1;
11602                 return 0;
11603
11604         default:
11605                 return -EOPNOTSUPP;
11606         }
11607 }
11608
11609 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11610 {
11611         u32 size = 0;
11612         struct tg3 *tp = netdev_priv(dev);
11613
11614         if (tg3_flag(tp, SUPPORT_MSIX))
11615                 size = TG3_RSS_INDIR_TBL_SIZE;
11616
11617         return size;
11618 }
11619
11620 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11621 {
11622         struct tg3 *tp = netdev_priv(dev);
11623         int i;
11624
11625         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11626                 indir[i] = tp->rss_ind_tbl[i];
11627
11628         return 0;
11629 }
11630
11631 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11632 {
11633         struct tg3 *tp = netdev_priv(dev);
11634         size_t i;
11635
11636         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11637                 tp->rss_ind_tbl[i] = indir[i];
11638
11639         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11640                 return 0;
11641
11642         /* It is legal to write the indirection
11643          * table while the device is running.
11644          */
11645         tg3_full_lock(tp, 0);
11646         tg3_rss_write_indir_tbl(tp);
11647         tg3_full_unlock(tp);
11648
11649         return 0;
11650 }
11651
11652 static void tg3_get_channels(struct net_device *dev,
11653                              struct ethtool_channels *channel)
11654 {
11655         struct tg3 *tp = netdev_priv(dev);
11656         u32 deflt_qs = netif_get_num_default_rss_queues();
11657
11658         channel->max_rx = tp->rxq_max;
11659         channel->max_tx = tp->txq_max;
11660
11661         if (netif_running(dev)) {
11662                 channel->rx_count = tp->rxq_cnt;
11663                 channel->tx_count = tp->txq_cnt;
11664         } else {
11665                 if (tp->rxq_req)
11666                         channel->rx_count = tp->rxq_req;
11667                 else
11668                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11669
11670                 if (tp->txq_req)
11671                         channel->tx_count = tp->txq_req;
11672                 else
11673                         channel->tx_count = min(deflt_qs, tp->txq_max);
11674         }
11675 }
11676
11677 static int tg3_set_channels(struct net_device *dev,
11678                             struct ethtool_channels *channel)
11679 {
11680         struct tg3 *tp = netdev_priv(dev);
11681
11682         if (!tg3_flag(tp, SUPPORT_MSIX))
11683                 return -EOPNOTSUPP;
11684
11685         if (channel->rx_count > tp->rxq_max ||
11686             channel->tx_count > tp->txq_max)
11687                 return -EINVAL;
11688
11689         tp->rxq_req = channel->rx_count;
11690         tp->txq_req = channel->tx_count;
11691
11692         if (!netif_running(dev))
11693                 return 0;
11694
11695         tg3_stop(tp);
11696
11697         tg3_carrier_off(tp);
11698
11699         tg3_start(tp, true, false, false);
11700
11701         return 0;
11702 }
11703
11704 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11705 {
11706         switch (stringset) {
11707         case ETH_SS_STATS:
11708                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11709                 break;
11710         case ETH_SS_TEST:
11711                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11712                 break;
11713         default:
11714                 WARN_ON(1);     /* we need a WARN() */
11715                 break;
11716         }
11717 }
11718
11719 static int tg3_set_phys_id(struct net_device *dev,
11720                             enum ethtool_phys_id_state state)
11721 {
11722         struct tg3 *tp = netdev_priv(dev);
11723
11724         if (!netif_running(tp->dev))
11725                 return -EAGAIN;
11726
11727         switch (state) {
11728         case ETHTOOL_ID_ACTIVE:
11729                 return 1;       /* cycle on/off once per second */
11730
11731         case ETHTOOL_ID_ON:
11732                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11733                      LED_CTRL_1000MBPS_ON |
11734                      LED_CTRL_100MBPS_ON |
11735                      LED_CTRL_10MBPS_ON |
11736                      LED_CTRL_TRAFFIC_OVERRIDE |
11737                      LED_CTRL_TRAFFIC_BLINK |
11738                      LED_CTRL_TRAFFIC_LED);
11739                 break;
11740
11741         case ETHTOOL_ID_OFF:
11742                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11743                      LED_CTRL_TRAFFIC_OVERRIDE);
11744                 break;
11745
11746         case ETHTOOL_ID_INACTIVE:
11747                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11748                 break;
11749         }
11750
11751         return 0;
11752 }
11753
11754 static void tg3_get_ethtool_stats(struct net_device *dev,
11755                                    struct ethtool_stats *estats, u64 *tmp_stats)
11756 {
11757         struct tg3 *tp = netdev_priv(dev);
11758
11759         if (tp->hw_stats)
11760                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11761         else
11762                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11763 }
11764
11765 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11766 {
11767         int i;
11768         __be32 *buf;
11769         u32 offset = 0, len = 0;
11770         u32 magic, val;
11771
11772         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11773                 return NULL;
11774
11775         if (magic == TG3_EEPROM_MAGIC) {
11776                 for (offset = TG3_NVM_DIR_START;
11777                      offset < TG3_NVM_DIR_END;
11778                      offset += TG3_NVM_DIRENT_SIZE) {
11779                         if (tg3_nvram_read(tp, offset, &val))
11780                                 return NULL;
11781
11782                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11783                             TG3_NVM_DIRTYPE_EXTVPD)
11784                                 break;
11785                 }
11786
11787                 if (offset != TG3_NVM_DIR_END) {
11788                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11789                         if (tg3_nvram_read(tp, offset + 4, &offset))
11790                                 return NULL;
11791
11792                         offset = tg3_nvram_logical_addr(tp, offset);
11793                 }
11794         }
11795
11796         if (!offset || !len) {
11797                 offset = TG3_NVM_VPD_OFF;
11798                 len = TG3_NVM_VPD_LEN;
11799         }
11800
11801         buf = kmalloc(len, GFP_KERNEL);
11802         if (buf == NULL)
11803                 return NULL;
11804
11805         if (magic == TG3_EEPROM_MAGIC) {
11806                 for (i = 0; i < len; i += 4) {
11807                         /* The data is in little-endian format in NVRAM.
11808                          * Use the big-endian read routines to preserve
11809                          * the byte order as it exists in NVRAM.
11810                          */
11811                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11812                                 goto error;
11813                 }
11814         } else {
11815                 u8 *ptr;
11816                 ssize_t cnt;
11817                 unsigned int pos = 0;
11818
11819                 ptr = (u8 *)&buf[0];
11820                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11821                         cnt = pci_read_vpd(tp->pdev, pos,
11822                                            len - pos, ptr);
11823                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11824                                 cnt = 0;
11825                         else if (cnt < 0)
11826                                 goto error;
11827                 }
11828                 if (pos != len)
11829                         goto error;
11830         }
11831
11832         *vpdlen = len;
11833
11834         return buf;
11835
11836 error:
11837         kfree(buf);
11838         return NULL;
11839 }
11840
11841 #define NVRAM_TEST_SIZE 0x100
11842 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11843 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11844 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11845 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11846 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11847 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11848 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11849 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11850
11851 static int tg3_test_nvram(struct tg3 *tp)
11852 {
11853         u32 csum, magic, len;
11854         __be32 *buf;
11855         int i, j, k, err = 0, size;
11856
11857         if (tg3_flag(tp, NO_NVRAM))
11858                 return 0;
11859
11860         if (tg3_nvram_read(tp, 0, &magic) != 0)
11861                 return -EIO;
11862
11863         if (magic == TG3_EEPROM_MAGIC)
11864                 size = NVRAM_TEST_SIZE;
11865         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11866                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11867                     TG3_EEPROM_SB_FORMAT_1) {
11868                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11869                         case TG3_EEPROM_SB_REVISION_0:
11870                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11871                                 break;
11872                         case TG3_EEPROM_SB_REVISION_2:
11873                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11874                                 break;
11875                         case TG3_EEPROM_SB_REVISION_3:
11876                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11877                                 break;
11878                         case TG3_EEPROM_SB_REVISION_4:
11879                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11880                                 break;
11881                         case TG3_EEPROM_SB_REVISION_5:
11882                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11883                                 break;
11884                         case TG3_EEPROM_SB_REVISION_6:
11885                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11886                                 break;
11887                         default:
11888                                 return -EIO;
11889                         }
11890                 } else
11891                         return 0;
11892         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11893                 size = NVRAM_SELFBOOT_HW_SIZE;
11894         else
11895                 return -EIO;
11896
11897         buf = kmalloc(size, GFP_KERNEL);
11898         if (buf == NULL)
11899                 return -ENOMEM;
11900
11901         err = -EIO;
11902         for (i = 0, j = 0; i < size; i += 4, j++) {
11903                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11904                 if (err)
11905                         break;
11906         }
11907         if (i < size)
11908                 goto out;
11909
11910         /* Selfboot format */
11911         magic = be32_to_cpu(buf[0]);
11912         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11913             TG3_EEPROM_MAGIC_FW) {
11914                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11915
11916                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11917                     TG3_EEPROM_SB_REVISION_2) {
11918                         /* For rev 2, the csum doesn't include the MBA. */
11919                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11920                                 csum8 += buf8[i];
11921                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11922                                 csum8 += buf8[i];
11923                 } else {
11924                         for (i = 0; i < size; i++)
11925                                 csum8 += buf8[i];
11926                 }
11927
11928                 if (csum8 == 0) {
11929                         err = 0;
11930                         goto out;
11931                 }
11932
11933                 err = -EIO;
11934                 goto out;
11935         }
11936
11937         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11938             TG3_EEPROM_MAGIC_HW) {
11939                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11940                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11941                 u8 *buf8 = (u8 *) buf;
11942
11943                 /* Separate the parity bits and the data bytes.  */
11944                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11945                         if ((i == 0) || (i == 8)) {
11946                                 int l;
11947                                 u8 msk;
11948
11949                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11950                                         parity[k++] = buf8[i] & msk;
11951                                 i++;
11952                         } else if (i == 16) {
11953                                 int l;
11954                                 u8 msk;
11955
11956                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11957                                         parity[k++] = buf8[i] & msk;
11958                                 i++;
11959
11960                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11961                                         parity[k++] = buf8[i] & msk;
11962                                 i++;
11963                         }
11964                         data[j++] = buf8[i];
11965                 }
11966
11967                 err = -EIO;
11968                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11969                         u8 hw8 = hweight8(data[i]);
11970
11971                         if ((hw8 & 0x1) && parity[i])
11972                                 goto out;
11973                         else if (!(hw8 & 0x1) && !parity[i])
11974                                 goto out;
11975                 }
11976                 err = 0;
11977                 goto out;
11978         }
11979
11980         err = -EIO;
11981
11982         /* Bootstrap checksum at offset 0x10 */
11983         csum = calc_crc((unsigned char *) buf, 0x10);
11984         if (csum != le32_to_cpu(buf[0x10/4]))
11985                 goto out;
11986
11987         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11988         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11989         if (csum != le32_to_cpu(buf[0xfc/4]))
11990                 goto out;
11991
11992         kfree(buf);
11993
11994         buf = tg3_vpd_readblock(tp, &len);
11995         if (!buf)
11996                 return -ENOMEM;
11997
11998         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11999         if (i > 0) {
12000                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12001                 if (j < 0)
12002                         goto out;
12003
12004                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12005                         goto out;
12006
12007                 i += PCI_VPD_LRDT_TAG_SIZE;
12008                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12009                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12010                 if (j > 0) {
12011                         u8 csum8 = 0;
12012
12013                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12014
12015                         for (i = 0; i <= j; i++)
12016                                 csum8 += ((u8 *)buf)[i];
12017
12018                         if (csum8)
12019                                 goto out;
12020                 }
12021         }
12022
12023         err = 0;
12024
12025 out:
12026         kfree(buf);
12027         return err;
12028 }
12029
12030 #define TG3_SERDES_TIMEOUT_SEC  2
12031 #define TG3_COPPER_TIMEOUT_SEC  6
12032
12033 static int tg3_test_link(struct tg3 *tp)
12034 {
12035         int i, max;
12036
12037         if (!netif_running(tp->dev))
12038                 return -ENODEV;
12039
12040         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12041                 max = TG3_SERDES_TIMEOUT_SEC;
12042         else
12043                 max = TG3_COPPER_TIMEOUT_SEC;
12044
12045         for (i = 0; i < max; i++) {
12046                 if (tp->link_up)
12047                         return 0;
12048
12049                 if (msleep_interruptible(1000))
12050                         break;
12051         }
12052
12053         return -EIO;
12054 }
12055
12056 /* Only test the commonly used registers */
12057 static int tg3_test_registers(struct tg3 *tp)
12058 {
12059         int i, is_5705, is_5750;
12060         u32 offset, read_mask, write_mask, val, save_val, read_val;
12061         static struct {
12062                 u16 offset;
12063                 u16 flags;
12064 #define TG3_FL_5705     0x1
12065 #define TG3_FL_NOT_5705 0x2
12066 #define TG3_FL_NOT_5788 0x4
12067 #define TG3_FL_NOT_5750 0x8
12068                 u32 read_mask;
12069                 u32 write_mask;
12070         } reg_tbl[] = {
12071                 /* MAC Control Registers */
12072                 { MAC_MODE, TG3_FL_NOT_5705,
12073                         0x00000000, 0x00ef6f8c },
12074                 { MAC_MODE, TG3_FL_5705,
12075                         0x00000000, 0x01ef6b8c },
12076                 { MAC_STATUS, TG3_FL_NOT_5705,
12077                         0x03800107, 0x00000000 },
12078                 { MAC_STATUS, TG3_FL_5705,
12079                         0x03800100, 0x00000000 },
12080                 { MAC_ADDR_0_HIGH, 0x0000,
12081                         0x00000000, 0x0000ffff },
12082                 { MAC_ADDR_0_LOW, 0x0000,
12083                         0x00000000, 0xffffffff },
12084                 { MAC_RX_MTU_SIZE, 0x0000,
12085                         0x00000000, 0x0000ffff },
12086                 { MAC_TX_MODE, 0x0000,
12087                         0x00000000, 0x00000070 },
12088                 { MAC_TX_LENGTHS, 0x0000,
12089                         0x00000000, 0x00003fff },
12090                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12091                         0x00000000, 0x000007fc },
12092                 { MAC_RX_MODE, TG3_FL_5705,
12093                         0x00000000, 0x000007dc },
12094                 { MAC_HASH_REG_0, 0x0000,
12095                         0x00000000, 0xffffffff },
12096                 { MAC_HASH_REG_1, 0x0000,
12097                         0x00000000, 0xffffffff },
12098                 { MAC_HASH_REG_2, 0x0000,
12099                         0x00000000, 0xffffffff },
12100                 { MAC_HASH_REG_3, 0x0000,
12101                         0x00000000, 0xffffffff },
12102
12103                 /* Receive Data and Receive BD Initiator Control Registers. */
12104                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12105                         0x00000000, 0xffffffff },
12106                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12107                         0x00000000, 0xffffffff },
12108                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12109                         0x00000000, 0x00000003 },
12110                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12111                         0x00000000, 0xffffffff },
12112                 { RCVDBDI_STD_BD+0, 0x0000,
12113                         0x00000000, 0xffffffff },
12114                 { RCVDBDI_STD_BD+4, 0x0000,
12115                         0x00000000, 0xffffffff },
12116                 { RCVDBDI_STD_BD+8, 0x0000,
12117                         0x00000000, 0xffff0002 },
12118                 { RCVDBDI_STD_BD+0xc, 0x0000,
12119                         0x00000000, 0xffffffff },
12120
12121                 /* Receive BD Initiator Control Registers. */
12122                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12123                         0x00000000, 0xffffffff },
12124                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12125                         0x00000000, 0x000003ff },
12126                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12127                         0x00000000, 0xffffffff },
12128
12129                 /* Host Coalescing Control Registers. */
12130                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12131                         0x00000000, 0x00000004 },
12132                 { HOSTCC_MODE, TG3_FL_5705,
12133                         0x00000000, 0x000000f6 },
12134                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12135                         0x00000000, 0xffffffff },
12136                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12137                         0x00000000, 0x000003ff },
12138                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12139                         0x00000000, 0xffffffff },
12140                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12141                         0x00000000, 0x000003ff },
12142                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12143                         0x00000000, 0xffffffff },
12144                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12145                         0x00000000, 0x000000ff },
12146                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12147                         0x00000000, 0xffffffff },
12148                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12149                         0x00000000, 0x000000ff },
12150                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12151                         0x00000000, 0xffffffff },
12152                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12153                         0x00000000, 0xffffffff },
12154                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12155                         0x00000000, 0xffffffff },
12156                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12157                         0x00000000, 0x000000ff },
12158                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12159                         0x00000000, 0xffffffff },
12160                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12161                         0x00000000, 0x000000ff },
12162                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12163                         0x00000000, 0xffffffff },
12164                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12165                         0x00000000, 0xffffffff },
12166                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12167                         0x00000000, 0xffffffff },
12168                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12169                         0x00000000, 0xffffffff },
12170                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12171                         0x00000000, 0xffffffff },
12172                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12173                         0xffffffff, 0x00000000 },
12174                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12175                         0xffffffff, 0x00000000 },
12176
12177                 /* Buffer Manager Control Registers. */
12178                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12179                         0x00000000, 0x007fff80 },
12180                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12181                         0x00000000, 0x007fffff },
12182                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12183                         0x00000000, 0x0000003f },
12184                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12185                         0x00000000, 0x000001ff },
12186                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12187                         0x00000000, 0x000001ff },
12188                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12189                         0xffffffff, 0x00000000 },
12190                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12191                         0xffffffff, 0x00000000 },
12192
12193                 /* Mailbox Registers */
12194                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12195                         0x00000000, 0x000001ff },
12196                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12197                         0x00000000, 0x000001ff },
12198                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12199                         0x00000000, 0x000007ff },
12200                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12201                         0x00000000, 0x000001ff },
12202
12203                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12204         };
12205
12206         is_5705 = is_5750 = 0;
12207         if (tg3_flag(tp, 5705_PLUS)) {
12208                 is_5705 = 1;
12209                 if (tg3_flag(tp, 5750_PLUS))
12210                         is_5750 = 1;
12211         }
12212
12213         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12214                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12215                         continue;
12216
12217                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12218                         continue;
12219
12220                 if (tg3_flag(tp, IS_5788) &&
12221                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12222                         continue;
12223
12224                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12225                         continue;
12226
12227                 offset = (u32) reg_tbl[i].offset;
12228                 read_mask = reg_tbl[i].read_mask;
12229                 write_mask = reg_tbl[i].write_mask;
12230
12231                 /* Save the original register content */
12232                 save_val = tr32(offset);
12233
12234                 /* Determine the read-only value. */
12235                 read_val = save_val & read_mask;
12236
12237                 /* Write zero to the register, then make sure the read-only bits
12238                  * are not changed and the read/write bits are all zeros.
12239                  */
12240                 tw32(offset, 0);
12241
12242                 val = tr32(offset);
12243
12244                 /* Test the read-only and read/write bits. */
12245                 if (((val & read_mask) != read_val) || (val & write_mask))
12246                         goto out;
12247
12248                 /* Write ones to all the bits defined by RdMask and WrMask, then
12249                  * make sure the read-only bits are not changed and the
12250                  * read/write bits are all ones.
12251                  */
12252                 tw32(offset, read_mask | write_mask);
12253
12254                 val = tr32(offset);
12255
12256                 /* Test the read-only bits. */
12257                 if ((val & read_mask) != read_val)
12258                         goto out;
12259
12260                 /* Test the read/write bits. */
12261                 if ((val & write_mask) != write_mask)
12262                         goto out;
12263
12264                 tw32(offset, save_val);
12265         }
12266
12267         return 0;
12268
12269 out:
12270         if (netif_msg_hw(tp))
12271                 netdev_err(tp->dev,
12272                            "Register test failed at offset %x\n", offset);
12273         tw32(offset, save_val);
12274         return -EIO;
12275 }
12276
12277 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12278 {
12279         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12280         int i;
12281         u32 j;
12282
12283         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12284                 for (j = 0; j < len; j += 4) {
12285                         u32 val;
12286
12287                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12288                         tg3_read_mem(tp, offset + j, &val);
12289                         if (val != test_pattern[i])
12290                                 return -EIO;
12291                 }
12292         }
12293         return 0;
12294 }
12295
12296 static int tg3_test_memory(struct tg3 *tp)
12297 {
12298         static struct mem_entry {
12299                 u32 offset;
12300                 u32 len;
12301         } mem_tbl_570x[] = {
12302                 { 0x00000000, 0x00b50},
12303                 { 0x00002000, 0x1c000},
12304                 { 0xffffffff, 0x00000}
12305         }, mem_tbl_5705[] = {
12306                 { 0x00000100, 0x0000c},
12307                 { 0x00000200, 0x00008},
12308                 { 0x00004000, 0x00800},
12309                 { 0x00006000, 0x01000},
12310                 { 0x00008000, 0x02000},
12311                 { 0x00010000, 0x0e000},
12312                 { 0xffffffff, 0x00000}
12313         }, mem_tbl_5755[] = {
12314                 { 0x00000200, 0x00008},
12315                 { 0x00004000, 0x00800},
12316                 { 0x00006000, 0x00800},
12317                 { 0x00008000, 0x02000},
12318                 { 0x00010000, 0x0c000},
12319                 { 0xffffffff, 0x00000}
12320         }, mem_tbl_5906[] = {
12321                 { 0x00000200, 0x00008},
12322                 { 0x00004000, 0x00400},
12323                 { 0x00006000, 0x00400},
12324                 { 0x00008000, 0x01000},
12325                 { 0x00010000, 0x01000},
12326                 { 0xffffffff, 0x00000}
12327         }, mem_tbl_5717[] = {
12328                 { 0x00000200, 0x00008},
12329                 { 0x00010000, 0x0a000},
12330                 { 0x00020000, 0x13c00},
12331                 { 0xffffffff, 0x00000}
12332         }, mem_tbl_57765[] = {
12333                 { 0x00000200, 0x00008},
12334                 { 0x00004000, 0x00800},
12335                 { 0x00006000, 0x09800},
12336                 { 0x00010000, 0x0a000},
12337                 { 0xffffffff, 0x00000}
12338         };
12339         struct mem_entry *mem_tbl;
12340         int err = 0;
12341         int i;
12342
12343         if (tg3_flag(tp, 5717_PLUS))
12344                 mem_tbl = mem_tbl_5717;
12345         else if (tg3_flag(tp, 57765_CLASS))
12346                 mem_tbl = mem_tbl_57765;
12347         else if (tg3_flag(tp, 5755_PLUS))
12348                 mem_tbl = mem_tbl_5755;
12349         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12350                 mem_tbl = mem_tbl_5906;
12351         else if (tg3_flag(tp, 5705_PLUS))
12352                 mem_tbl = mem_tbl_5705;
12353         else
12354                 mem_tbl = mem_tbl_570x;
12355
12356         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12357                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12358                 if (err)
12359                         break;
12360         }
12361
12362         return err;
12363 }
12364
12365 #define TG3_TSO_MSS             500
12366
12367 #define TG3_TSO_IP_HDR_LEN      20
12368 #define TG3_TSO_TCP_HDR_LEN     20
12369 #define TG3_TSO_TCP_OPT_LEN     12
12370
12371 static const u8 tg3_tso_header[] = {
12372 0x08, 0x00,
12373 0x45, 0x00, 0x00, 0x00,
12374 0x00, 0x00, 0x40, 0x00,
12375 0x40, 0x06, 0x00, 0x00,
12376 0x0a, 0x00, 0x00, 0x01,
12377 0x0a, 0x00, 0x00, 0x02,
12378 0x0d, 0x00, 0xe0, 0x00,
12379 0x00, 0x00, 0x01, 0x00,
12380 0x00, 0x00, 0x02, 0x00,
12381 0x80, 0x10, 0x10, 0x00,
12382 0x14, 0x09, 0x00, 0x00,
12383 0x01, 0x01, 0x08, 0x0a,
12384 0x11, 0x11, 0x11, 0x11,
12385 0x11, 0x11, 0x11, 0x11,
12386 };
12387
12388 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12389 {
12390         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12391         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12392         u32 budget;
12393         struct sk_buff *skb;
12394         u8 *tx_data, *rx_data;
12395         dma_addr_t map;
12396         int num_pkts, tx_len, rx_len, i, err;
12397         struct tg3_rx_buffer_desc *desc;
12398         struct tg3_napi *tnapi, *rnapi;
12399         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12400
12401         tnapi = &tp->napi[0];
12402         rnapi = &tp->napi[0];
12403         if (tp->irq_cnt > 1) {
12404                 if (tg3_flag(tp, ENABLE_RSS))
12405                         rnapi = &tp->napi[1];
12406                 if (tg3_flag(tp, ENABLE_TSS))
12407                         tnapi = &tp->napi[1];
12408         }
12409         coal_now = tnapi->coal_now | rnapi->coal_now;
12410
12411         err = -EIO;
12412
12413         tx_len = pktsz;
12414         skb = netdev_alloc_skb(tp->dev, tx_len);
12415         if (!skb)
12416                 return -ENOMEM;
12417
12418         tx_data = skb_put(skb, tx_len);
12419         memcpy(tx_data, tp->dev->dev_addr, 6);
12420         memset(tx_data + 6, 0x0, 8);
12421
12422         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12423
12424         if (tso_loopback) {
12425                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12426
12427                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12428                               TG3_TSO_TCP_OPT_LEN;
12429
12430                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12431                        sizeof(tg3_tso_header));
12432                 mss = TG3_TSO_MSS;
12433
12434                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12435                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12436
12437                 /* Set the total length field in the IP header */
12438                 iph->tot_len = htons((u16)(mss + hdr_len));
12439
12440                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12441                               TXD_FLAG_CPU_POST_DMA);
12442
12443                 if (tg3_flag(tp, HW_TSO_1) ||
12444                     tg3_flag(tp, HW_TSO_2) ||
12445                     tg3_flag(tp, HW_TSO_3)) {
12446                         struct tcphdr *th;
12447                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12448                         th = (struct tcphdr *)&tx_data[val];
12449                         th->check = 0;
12450                 } else
12451                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12452
12453                 if (tg3_flag(tp, HW_TSO_3)) {
12454                         mss |= (hdr_len & 0xc) << 12;
12455                         if (hdr_len & 0x10)
12456                                 base_flags |= 0x00000010;
12457                         base_flags |= (hdr_len & 0x3e0) << 5;
12458                 } else if (tg3_flag(tp, HW_TSO_2))
12459                         mss |= hdr_len << 9;
12460                 else if (tg3_flag(tp, HW_TSO_1) ||
12461                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12462                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12463                 } else {
12464                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12465                 }
12466
12467                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12468         } else {
12469                 num_pkts = 1;
12470                 data_off = ETH_HLEN;
12471
12472                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12473                     tx_len > VLAN_ETH_FRAME_LEN)
12474                         base_flags |= TXD_FLAG_JMB_PKT;
12475         }
12476
12477         for (i = data_off; i < tx_len; i++)
12478                 tx_data[i] = (u8) (i & 0xff);
12479
12480         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12481         if (pci_dma_mapping_error(tp->pdev, map)) {
12482                 dev_kfree_skb(skb);
12483                 return -EIO;
12484         }
12485
12486         val = tnapi->tx_prod;
12487         tnapi->tx_buffers[val].skb = skb;
12488         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12489
12490         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12491                rnapi->coal_now);
12492
12493         udelay(10);
12494
12495         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12496
12497         budget = tg3_tx_avail(tnapi);
12498         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12499                             base_flags | TXD_FLAG_END, mss, 0)) {
12500                 tnapi->tx_buffers[val].skb = NULL;
12501                 dev_kfree_skb(skb);
12502                 return -EIO;
12503         }
12504
12505         tnapi->tx_prod++;
12506
12507         /* Sync BD data before updating mailbox */
12508         wmb();
12509
12510         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12511         tr32_mailbox(tnapi->prodmbox);
12512
12513         udelay(10);
12514
12515         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12516         for (i = 0; i < 35; i++) {
12517                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12518                        coal_now);
12519
12520                 udelay(10);
12521
12522                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12523                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12524                 if ((tx_idx == tnapi->tx_prod) &&
12525                     (rx_idx == (rx_start_idx + num_pkts)))
12526                         break;
12527         }
12528
12529         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12530         dev_kfree_skb(skb);
12531
12532         if (tx_idx != tnapi->tx_prod)
12533                 goto out;
12534
12535         if (rx_idx != rx_start_idx + num_pkts)
12536                 goto out;
12537
12538         val = data_off;
12539         while (rx_idx != rx_start_idx) {
12540                 desc = &rnapi->rx_rcb[rx_start_idx++];
12541                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12542                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12543
12544                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12545                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12546                         goto out;
12547
12548                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12549                          - ETH_FCS_LEN;
12550
12551                 if (!tso_loopback) {
12552                         if (rx_len != tx_len)
12553                                 goto out;
12554
12555                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12556                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12557                                         goto out;
12558                         } else {
12559                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12560                                         goto out;
12561                         }
12562                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12563                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12564                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12565                         goto out;
12566                 }
12567
12568                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12569                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12570                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12571                                              mapping);
12572                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12573                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12574                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12575                                              mapping);
12576                 } else
12577                         goto out;
12578
12579                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12580                                             PCI_DMA_FROMDEVICE);
12581
12582                 rx_data += TG3_RX_OFFSET(tp);
12583                 for (i = data_off; i < rx_len; i++, val++) {
12584                         if (*(rx_data + i) != (u8) (val & 0xff))
12585                                 goto out;
12586                 }
12587         }
12588
12589         err = 0;
12590
12591         /* tg3_free_rings will unmap and free the rx_data */
12592 out:
12593         return err;
12594 }
12595
12596 #define TG3_STD_LOOPBACK_FAILED         1
12597 #define TG3_JMB_LOOPBACK_FAILED         2
12598 #define TG3_TSO_LOOPBACK_FAILED         4
12599 #define TG3_LOOPBACK_FAILED \
12600         (TG3_STD_LOOPBACK_FAILED | \
12601          TG3_JMB_LOOPBACK_FAILED | \
12602          TG3_TSO_LOOPBACK_FAILED)
12603
12604 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12605 {
12606         int err = -EIO;
12607         u32 eee_cap;
12608         u32 jmb_pkt_sz = 9000;
12609
12610         if (tp->dma_limit)
12611                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12612
12613         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12614         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12615
12616         if (!netif_running(tp->dev)) {
12617                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12618                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12619                 if (do_extlpbk)
12620                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12621                 goto done;
12622         }
12623
12624         err = tg3_reset_hw(tp, 1);
12625         if (err) {
12626                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12627                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12628                 if (do_extlpbk)
12629                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12630                 goto done;
12631         }
12632
12633         if (tg3_flag(tp, ENABLE_RSS)) {
12634                 int i;
12635
12636                 /* Reroute all rx packets to the 1st queue */
12637                 for (i = MAC_RSS_INDIR_TBL_0;
12638                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12639                         tw32(i, 0x0);
12640         }
12641
12642         /* HW errata - mac loopback fails in some cases on 5780.
12643          * Normal traffic and PHY loopback are not affected by
12644          * errata.  Also, the MAC loopback test is deprecated for
12645          * all newer ASIC revisions.
12646          */
12647         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12648             !tg3_flag(tp, CPMU_PRESENT)) {
12649                 tg3_mac_loopback(tp, true);
12650
12651                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12652                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12653
12654                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12655                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12656                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12657
12658                 tg3_mac_loopback(tp, false);
12659         }
12660
12661         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12662             !tg3_flag(tp, USE_PHYLIB)) {
12663                 int i;
12664
12665                 tg3_phy_lpbk_set(tp, 0, false);
12666
12667                 /* Wait for link */
12668                 for (i = 0; i < 100; i++) {
12669                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12670                                 break;
12671                         mdelay(1);
12672                 }
12673
12674                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12675                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12676                 if (tg3_flag(tp, TSO_CAPABLE) &&
12677                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12678                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12679                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12680                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12681                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12682
12683                 if (do_extlpbk) {
12684                         tg3_phy_lpbk_set(tp, 0, true);
12685
12686                         /* All link indications report up, but the hardware
12687                          * isn't really ready for about 20 msec.  Double it
12688                          * to be sure.
12689                          */
12690                         mdelay(40);
12691
12692                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12693                                 data[TG3_EXT_LOOPB_TEST] |=
12694                                                         TG3_STD_LOOPBACK_FAILED;
12695                         if (tg3_flag(tp, TSO_CAPABLE) &&
12696                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12697                                 data[TG3_EXT_LOOPB_TEST] |=
12698                                                         TG3_TSO_LOOPBACK_FAILED;
12699                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12700                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12701                                 data[TG3_EXT_LOOPB_TEST] |=
12702                                                         TG3_JMB_LOOPBACK_FAILED;
12703                 }
12704
12705                 /* Re-enable gphy autopowerdown. */
12706                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12707                         tg3_phy_toggle_apd(tp, true);
12708         }
12709
12710         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12711                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12712
12713 done:
12714         tp->phy_flags |= eee_cap;
12715
12716         return err;
12717 }
12718
12719 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12720                           u64 *data)
12721 {
12722         struct tg3 *tp = netdev_priv(dev);
12723         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12724
12725         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12726             tg3_power_up(tp)) {
12727                 etest->flags |= ETH_TEST_FL_FAILED;
12728                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12729                 return;
12730         }
12731
12732         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12733
12734         if (tg3_test_nvram(tp) != 0) {
12735                 etest->flags |= ETH_TEST_FL_FAILED;
12736                 data[TG3_NVRAM_TEST] = 1;
12737         }
12738         if (!doextlpbk && tg3_test_link(tp)) {
12739                 etest->flags |= ETH_TEST_FL_FAILED;
12740                 data[TG3_LINK_TEST] = 1;
12741         }
12742         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12743                 int err, err2 = 0, irq_sync = 0;
12744
12745                 if (netif_running(dev)) {
12746                         tg3_phy_stop(tp);
12747                         tg3_netif_stop(tp);
12748                         irq_sync = 1;
12749                 }
12750
12751                 tg3_full_lock(tp, irq_sync);
12752                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12753                 err = tg3_nvram_lock(tp);
12754                 tg3_halt_cpu(tp, RX_CPU_BASE);
12755                 if (!tg3_flag(tp, 5705_PLUS))
12756                         tg3_halt_cpu(tp, TX_CPU_BASE);
12757                 if (!err)
12758                         tg3_nvram_unlock(tp);
12759
12760                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12761                         tg3_phy_reset(tp);
12762
12763                 if (tg3_test_registers(tp) != 0) {
12764                         etest->flags |= ETH_TEST_FL_FAILED;
12765                         data[TG3_REGISTER_TEST] = 1;
12766                 }
12767
12768                 if (tg3_test_memory(tp) != 0) {
12769                         etest->flags |= ETH_TEST_FL_FAILED;
12770                         data[TG3_MEMORY_TEST] = 1;
12771                 }
12772
12773                 if (doextlpbk)
12774                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12775
12776                 if (tg3_test_loopback(tp, data, doextlpbk))
12777                         etest->flags |= ETH_TEST_FL_FAILED;
12778
12779                 tg3_full_unlock(tp);
12780
12781                 if (tg3_test_interrupt(tp) != 0) {
12782                         etest->flags |= ETH_TEST_FL_FAILED;
12783                         data[TG3_INTERRUPT_TEST] = 1;
12784                 }
12785
12786                 tg3_full_lock(tp, 0);
12787
12788                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12789                 if (netif_running(dev)) {
12790                         tg3_flag_set(tp, INIT_COMPLETE);
12791                         err2 = tg3_restart_hw(tp, 1);
12792                         if (!err2)
12793                                 tg3_netif_start(tp);
12794                 }
12795
12796                 tg3_full_unlock(tp);
12797
12798                 if (irq_sync && !err2)
12799                         tg3_phy_start(tp);
12800         }
12801         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12802                 tg3_power_down(tp);
12803
12804 }
12805
12806 static int tg3_hwtstamp_ioctl(struct net_device *dev,
12807                               struct ifreq *ifr, int cmd)
12808 {
12809         struct tg3 *tp = netdev_priv(dev);
12810         struct hwtstamp_config stmpconf;
12811
12812         if (!tg3_flag(tp, PTP_CAPABLE))
12813                 return -EINVAL;
12814
12815         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
12816                 return -EFAULT;
12817
12818         if (stmpconf.flags)
12819                 return -EINVAL;
12820
12821         switch (stmpconf.tx_type) {
12822         case HWTSTAMP_TX_ON:
12823                 tg3_flag_set(tp, TX_TSTAMP_EN);
12824                 break;
12825         case HWTSTAMP_TX_OFF:
12826                 tg3_flag_clear(tp, TX_TSTAMP_EN);
12827                 break;
12828         default:
12829                 return -ERANGE;
12830         }
12831
12832         switch (stmpconf.rx_filter) {
12833         case HWTSTAMP_FILTER_NONE:
12834                 tp->rxptpctl = 0;
12835                 break;
12836         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
12837                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12838                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
12839                 break;
12840         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
12841                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12842                                TG3_RX_PTP_CTL_SYNC_EVNT;
12843                 break;
12844         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
12845                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
12846                                TG3_RX_PTP_CTL_DELAY_REQ;
12847                 break;
12848         case HWTSTAMP_FILTER_PTP_V2_EVENT:
12849                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12850                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12851                 break;
12852         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
12853                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12854                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12855                 break;
12856         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
12857                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12858                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
12859                 break;
12860         case HWTSTAMP_FILTER_PTP_V2_SYNC:
12861                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12862                                TG3_RX_PTP_CTL_SYNC_EVNT;
12863                 break;
12864         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
12865                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12866                                TG3_RX_PTP_CTL_SYNC_EVNT;
12867                 break;
12868         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
12869                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12870                                TG3_RX_PTP_CTL_SYNC_EVNT;
12871                 break;
12872         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
12873                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
12874                                TG3_RX_PTP_CTL_DELAY_REQ;
12875                 break;
12876         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
12877                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
12878                                TG3_RX_PTP_CTL_DELAY_REQ;
12879                 break;
12880         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
12881                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
12882                                TG3_RX_PTP_CTL_DELAY_REQ;
12883                 break;
12884         default:
12885                 return -ERANGE;
12886         }
12887
12888         if (netif_running(dev) && tp->rxptpctl)
12889                 tw32(TG3_RX_PTP_CTL,
12890                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
12891
12892         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
12893                 -EFAULT : 0;
12894 }
12895
12896 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12897 {
12898         struct mii_ioctl_data *data = if_mii(ifr);
12899         struct tg3 *tp = netdev_priv(dev);
12900         int err;
12901
12902         if (tg3_flag(tp, USE_PHYLIB)) {
12903                 struct phy_device *phydev;
12904                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12905                         return -EAGAIN;
12906                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12907                 return phy_mii_ioctl(phydev, ifr, cmd);
12908         }
12909
12910         switch (cmd) {
12911         case SIOCGMIIPHY:
12912                 data->phy_id = tp->phy_addr;
12913
12914                 /* fallthru */
12915         case SIOCGMIIREG: {
12916                 u32 mii_regval;
12917
12918                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12919                         break;                  /* We have no PHY */
12920
12921                 if (!netif_running(dev))
12922                         return -EAGAIN;
12923
12924                 spin_lock_bh(&tp->lock);
12925                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12926                 spin_unlock_bh(&tp->lock);
12927
12928                 data->val_out = mii_regval;
12929
12930                 return err;
12931         }
12932
12933         case SIOCSMIIREG:
12934                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12935                         break;                  /* We have no PHY */
12936
12937                 if (!netif_running(dev))
12938                         return -EAGAIN;
12939
12940                 spin_lock_bh(&tp->lock);
12941                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12942                 spin_unlock_bh(&tp->lock);
12943
12944                 return err;
12945
12946         case SIOCSHWTSTAMP:
12947                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
12948
12949         default:
12950                 /* do nothing */
12951                 break;
12952         }
12953         return -EOPNOTSUPP;
12954 }
12955
12956 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12957 {
12958         struct tg3 *tp = netdev_priv(dev);
12959
12960         memcpy(ec, &tp->coal, sizeof(*ec));
12961         return 0;
12962 }
12963
12964 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12965 {
12966         struct tg3 *tp = netdev_priv(dev);
12967         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12968         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12969
12970         if (!tg3_flag(tp, 5705_PLUS)) {
12971                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12972                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12973                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12974                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12975         }
12976
12977         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12978             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12979             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12980             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12981             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12982             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12983             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12984             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12985             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12986             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12987                 return -EINVAL;
12988
12989         /* No rx interrupts will be generated if both are zero */
12990         if ((ec->rx_coalesce_usecs == 0) &&
12991             (ec->rx_max_coalesced_frames == 0))
12992                 return -EINVAL;
12993
12994         /* No tx interrupts will be generated if both are zero */
12995         if ((ec->tx_coalesce_usecs == 0) &&
12996             (ec->tx_max_coalesced_frames == 0))
12997                 return -EINVAL;
12998
12999         /* Only copy relevant parameters, ignore all others. */
13000         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13001         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13002         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13003         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13004         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13005         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13006         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13007         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13008         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13009
13010         if (netif_running(dev)) {
13011                 tg3_full_lock(tp, 0);
13012                 __tg3_set_coalesce(tp, &tp->coal);
13013                 tg3_full_unlock(tp);
13014         }
13015         return 0;
13016 }
13017
13018 static const struct ethtool_ops tg3_ethtool_ops = {
13019         .get_settings           = tg3_get_settings,
13020         .set_settings           = tg3_set_settings,
13021         .get_drvinfo            = tg3_get_drvinfo,
13022         .get_regs_len           = tg3_get_regs_len,
13023         .get_regs               = tg3_get_regs,
13024         .get_wol                = tg3_get_wol,
13025         .set_wol                = tg3_set_wol,
13026         .get_msglevel           = tg3_get_msglevel,
13027         .set_msglevel           = tg3_set_msglevel,
13028         .nway_reset             = tg3_nway_reset,
13029         .get_link               = ethtool_op_get_link,
13030         .get_eeprom_len         = tg3_get_eeprom_len,
13031         .get_eeprom             = tg3_get_eeprom,
13032         .set_eeprom             = tg3_set_eeprom,
13033         .get_ringparam          = tg3_get_ringparam,
13034         .set_ringparam          = tg3_set_ringparam,
13035         .get_pauseparam         = tg3_get_pauseparam,
13036         .set_pauseparam         = tg3_set_pauseparam,
13037         .self_test              = tg3_self_test,
13038         .get_strings            = tg3_get_strings,
13039         .set_phys_id            = tg3_set_phys_id,
13040         .get_ethtool_stats      = tg3_get_ethtool_stats,
13041         .get_coalesce           = tg3_get_coalesce,
13042         .set_coalesce           = tg3_set_coalesce,
13043         .get_sset_count         = tg3_get_sset_count,
13044         .get_rxnfc              = tg3_get_rxnfc,
13045         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13046         .get_rxfh_indir         = tg3_get_rxfh_indir,
13047         .set_rxfh_indir         = tg3_set_rxfh_indir,
13048         .get_channels           = tg3_get_channels,
13049         .set_channels           = tg3_set_channels,
13050         .get_ts_info            = tg3_get_ts_info,
13051 };
13052
13053 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13054                                                 struct rtnl_link_stats64 *stats)
13055 {
13056         struct tg3 *tp = netdev_priv(dev);
13057
13058         spin_lock_bh(&tp->lock);
13059         if (!tp->hw_stats) {
13060                 spin_unlock_bh(&tp->lock);
13061                 return &tp->net_stats_prev;
13062         }
13063
13064         tg3_get_nstats(tp, stats);
13065         spin_unlock_bh(&tp->lock);
13066
13067         return stats;
13068 }
13069
13070 static void tg3_set_rx_mode(struct net_device *dev)
13071 {
13072         struct tg3 *tp = netdev_priv(dev);
13073
13074         if (!netif_running(dev))
13075                 return;
13076
13077         tg3_full_lock(tp, 0);
13078         __tg3_set_rx_mode(dev);
13079         tg3_full_unlock(tp);
13080 }
13081
13082 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13083                                int new_mtu)
13084 {
13085         dev->mtu = new_mtu;
13086
13087         if (new_mtu > ETH_DATA_LEN) {
13088                 if (tg3_flag(tp, 5780_CLASS)) {
13089                         netdev_update_features(dev);
13090                         tg3_flag_clear(tp, TSO_CAPABLE);
13091                 } else {
13092                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13093                 }
13094         } else {
13095                 if (tg3_flag(tp, 5780_CLASS)) {
13096                         tg3_flag_set(tp, TSO_CAPABLE);
13097                         netdev_update_features(dev);
13098                 }
13099                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13100         }
13101 }
13102
13103 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13104 {
13105         struct tg3 *tp = netdev_priv(dev);
13106         int err, reset_phy = 0;
13107
13108         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13109                 return -EINVAL;
13110
13111         if (!netif_running(dev)) {
13112                 /* We'll just catch it later when the
13113                  * device is up'd.
13114                  */
13115                 tg3_set_mtu(dev, tp, new_mtu);
13116                 return 0;
13117         }
13118
13119         tg3_phy_stop(tp);
13120
13121         tg3_netif_stop(tp);
13122
13123         tg3_full_lock(tp, 1);
13124
13125         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13126
13127         tg3_set_mtu(dev, tp, new_mtu);
13128
13129         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13130          * breaks all requests to 256 bytes.
13131          */
13132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13133                 reset_phy = 1;
13134
13135         err = tg3_restart_hw(tp, reset_phy);
13136
13137         if (!err)
13138                 tg3_netif_start(tp);
13139
13140         tg3_full_unlock(tp);
13141
13142         if (!err)
13143                 tg3_phy_start(tp);
13144
13145         return err;
13146 }
13147
13148 static const struct net_device_ops tg3_netdev_ops = {
13149         .ndo_open               = tg3_open,
13150         .ndo_stop               = tg3_close,
13151         .ndo_start_xmit         = tg3_start_xmit,
13152         .ndo_get_stats64        = tg3_get_stats64,
13153         .ndo_validate_addr      = eth_validate_addr,
13154         .ndo_set_rx_mode        = tg3_set_rx_mode,
13155         .ndo_set_mac_address    = tg3_set_mac_addr,
13156         .ndo_do_ioctl           = tg3_ioctl,
13157         .ndo_tx_timeout         = tg3_tx_timeout,
13158         .ndo_change_mtu         = tg3_change_mtu,
13159         .ndo_fix_features       = tg3_fix_features,
13160         .ndo_set_features       = tg3_set_features,
13161 #ifdef CONFIG_NET_POLL_CONTROLLER
13162         .ndo_poll_controller    = tg3_poll_controller,
13163 #endif
13164 };
13165
13166 static void tg3_get_eeprom_size(struct tg3 *tp)
13167 {
13168         u32 cursize, val, magic;
13169
13170         tp->nvram_size = EEPROM_CHIP_SIZE;
13171
13172         if (tg3_nvram_read(tp, 0, &magic) != 0)
13173                 return;
13174
13175         if ((magic != TG3_EEPROM_MAGIC) &&
13176             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13177             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13178                 return;
13179
13180         /*
13181          * Size the chip by reading offsets at increasing powers of two.
13182          * When we encounter our validation signature, we know the addressing
13183          * has wrapped around, and thus have our chip size.
13184          */
13185         cursize = 0x10;
13186
13187         while (cursize < tp->nvram_size) {
13188                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13189                         return;
13190
13191                 if (val == magic)
13192                         break;
13193
13194                 cursize <<= 1;
13195         }
13196
13197         tp->nvram_size = cursize;
13198 }
13199
13200 static void tg3_get_nvram_size(struct tg3 *tp)
13201 {
13202         u32 val;
13203
13204         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13205                 return;
13206
13207         /* Selfboot format */
13208         if (val != TG3_EEPROM_MAGIC) {
13209                 tg3_get_eeprom_size(tp);
13210                 return;
13211         }
13212
13213         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13214                 if (val != 0) {
13215                         /* This is confusing.  We want to operate on the
13216                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13217                          * call will read from NVRAM and byteswap the data
13218                          * according to the byteswapping settings for all
13219                          * other register accesses.  This ensures the data we
13220                          * want will always reside in the lower 16-bits.
13221                          * However, the data in NVRAM is in LE format, which
13222                          * means the data from the NVRAM read will always be
13223                          * opposite the endianness of the CPU.  The 16-bit
13224                          * byteswap then brings the data to CPU endianness.
13225                          */
13226                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13227                         return;
13228                 }
13229         }
13230         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13231 }
13232
13233 static void tg3_get_nvram_info(struct tg3 *tp)
13234 {
13235         u32 nvcfg1;
13236
13237         nvcfg1 = tr32(NVRAM_CFG1);
13238         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13239                 tg3_flag_set(tp, FLASH);
13240         } else {
13241                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13242                 tw32(NVRAM_CFG1, nvcfg1);
13243         }
13244
13245         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13246             tg3_flag(tp, 5780_CLASS)) {
13247                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13248                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13249                         tp->nvram_jedecnum = JEDEC_ATMEL;
13250                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13251                         tg3_flag_set(tp, NVRAM_BUFFERED);
13252                         break;
13253                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13254                         tp->nvram_jedecnum = JEDEC_ATMEL;
13255                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13256                         break;
13257                 case FLASH_VENDOR_ATMEL_EEPROM:
13258                         tp->nvram_jedecnum = JEDEC_ATMEL;
13259                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13260                         tg3_flag_set(tp, NVRAM_BUFFERED);
13261                         break;
13262                 case FLASH_VENDOR_ST:
13263                         tp->nvram_jedecnum = JEDEC_ST;
13264                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13265                         tg3_flag_set(tp, NVRAM_BUFFERED);
13266                         break;
13267                 case FLASH_VENDOR_SAIFUN:
13268                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13269                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13270                         break;
13271                 case FLASH_VENDOR_SST_SMALL:
13272                 case FLASH_VENDOR_SST_LARGE:
13273                         tp->nvram_jedecnum = JEDEC_SST;
13274                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13275                         break;
13276                 }
13277         } else {
13278                 tp->nvram_jedecnum = JEDEC_ATMEL;
13279                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13280                 tg3_flag_set(tp, NVRAM_BUFFERED);
13281         }
13282 }
13283
13284 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13285 {
13286         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13287         case FLASH_5752PAGE_SIZE_256:
13288                 tp->nvram_pagesize = 256;
13289                 break;
13290         case FLASH_5752PAGE_SIZE_512:
13291                 tp->nvram_pagesize = 512;
13292                 break;
13293         case FLASH_5752PAGE_SIZE_1K:
13294                 tp->nvram_pagesize = 1024;
13295                 break;
13296         case FLASH_5752PAGE_SIZE_2K:
13297                 tp->nvram_pagesize = 2048;
13298                 break;
13299         case FLASH_5752PAGE_SIZE_4K:
13300                 tp->nvram_pagesize = 4096;
13301                 break;
13302         case FLASH_5752PAGE_SIZE_264:
13303                 tp->nvram_pagesize = 264;
13304                 break;
13305         case FLASH_5752PAGE_SIZE_528:
13306                 tp->nvram_pagesize = 528;
13307                 break;
13308         }
13309 }
13310
13311 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13312 {
13313         u32 nvcfg1;
13314
13315         nvcfg1 = tr32(NVRAM_CFG1);
13316
13317         /* NVRAM protection for TPM */
13318         if (nvcfg1 & (1 << 27))
13319                 tg3_flag_set(tp, PROTECTED_NVRAM);
13320
13321         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13322         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13323         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13324                 tp->nvram_jedecnum = JEDEC_ATMEL;
13325                 tg3_flag_set(tp, NVRAM_BUFFERED);
13326                 break;
13327         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13328                 tp->nvram_jedecnum = JEDEC_ATMEL;
13329                 tg3_flag_set(tp, NVRAM_BUFFERED);
13330                 tg3_flag_set(tp, FLASH);
13331                 break;
13332         case FLASH_5752VENDOR_ST_M45PE10:
13333         case FLASH_5752VENDOR_ST_M45PE20:
13334         case FLASH_5752VENDOR_ST_M45PE40:
13335                 tp->nvram_jedecnum = JEDEC_ST;
13336                 tg3_flag_set(tp, NVRAM_BUFFERED);
13337                 tg3_flag_set(tp, FLASH);
13338                 break;
13339         }
13340
13341         if (tg3_flag(tp, FLASH)) {
13342                 tg3_nvram_get_pagesize(tp, nvcfg1);
13343         } else {
13344                 /* For eeprom, set pagesize to maximum eeprom size */
13345                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13346
13347                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13348                 tw32(NVRAM_CFG1, nvcfg1);
13349         }
13350 }
13351
13352 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13353 {
13354         u32 nvcfg1, protect = 0;
13355
13356         nvcfg1 = tr32(NVRAM_CFG1);
13357
13358         /* NVRAM protection for TPM */
13359         if (nvcfg1 & (1 << 27)) {
13360                 tg3_flag_set(tp, PROTECTED_NVRAM);
13361                 protect = 1;
13362         }
13363
13364         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13365         switch (nvcfg1) {
13366         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13367         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13368         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13369         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13370                 tp->nvram_jedecnum = JEDEC_ATMEL;
13371                 tg3_flag_set(tp, NVRAM_BUFFERED);
13372                 tg3_flag_set(tp, FLASH);
13373                 tp->nvram_pagesize = 264;
13374                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13375                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13376                         tp->nvram_size = (protect ? 0x3e200 :
13377                                           TG3_NVRAM_SIZE_512KB);
13378                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13379                         tp->nvram_size = (protect ? 0x1f200 :
13380                                           TG3_NVRAM_SIZE_256KB);
13381                 else
13382                         tp->nvram_size = (protect ? 0x1f200 :
13383                                           TG3_NVRAM_SIZE_128KB);
13384                 break;
13385         case FLASH_5752VENDOR_ST_M45PE10:
13386         case FLASH_5752VENDOR_ST_M45PE20:
13387         case FLASH_5752VENDOR_ST_M45PE40:
13388                 tp->nvram_jedecnum = JEDEC_ST;
13389                 tg3_flag_set(tp, NVRAM_BUFFERED);
13390                 tg3_flag_set(tp, FLASH);
13391                 tp->nvram_pagesize = 256;
13392                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13393                         tp->nvram_size = (protect ?
13394                                           TG3_NVRAM_SIZE_64KB :
13395                                           TG3_NVRAM_SIZE_128KB);
13396                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13397                         tp->nvram_size = (protect ?
13398                                           TG3_NVRAM_SIZE_64KB :
13399                                           TG3_NVRAM_SIZE_256KB);
13400                 else
13401                         tp->nvram_size = (protect ?
13402                                           TG3_NVRAM_SIZE_128KB :
13403                                           TG3_NVRAM_SIZE_512KB);
13404                 break;
13405         }
13406 }
13407
13408 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13409 {
13410         u32 nvcfg1;
13411
13412         nvcfg1 = tr32(NVRAM_CFG1);
13413
13414         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13415         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13416         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13417         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13418         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13419                 tp->nvram_jedecnum = JEDEC_ATMEL;
13420                 tg3_flag_set(tp, NVRAM_BUFFERED);
13421                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13422
13423                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13424                 tw32(NVRAM_CFG1, nvcfg1);
13425                 break;
13426         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13427         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13428         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13429         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13430                 tp->nvram_jedecnum = JEDEC_ATMEL;
13431                 tg3_flag_set(tp, NVRAM_BUFFERED);
13432                 tg3_flag_set(tp, FLASH);
13433                 tp->nvram_pagesize = 264;
13434                 break;
13435         case FLASH_5752VENDOR_ST_M45PE10:
13436         case FLASH_5752VENDOR_ST_M45PE20:
13437         case FLASH_5752VENDOR_ST_M45PE40:
13438                 tp->nvram_jedecnum = JEDEC_ST;
13439                 tg3_flag_set(tp, NVRAM_BUFFERED);
13440                 tg3_flag_set(tp, FLASH);
13441                 tp->nvram_pagesize = 256;
13442                 break;
13443         }
13444 }
13445
13446 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13447 {
13448         u32 nvcfg1, protect = 0;
13449
13450         nvcfg1 = tr32(NVRAM_CFG1);
13451
13452         /* NVRAM protection for TPM */
13453         if (nvcfg1 & (1 << 27)) {
13454                 tg3_flag_set(tp, PROTECTED_NVRAM);
13455                 protect = 1;
13456         }
13457
13458         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13459         switch (nvcfg1) {
13460         case FLASH_5761VENDOR_ATMEL_ADB021D:
13461         case FLASH_5761VENDOR_ATMEL_ADB041D:
13462         case FLASH_5761VENDOR_ATMEL_ADB081D:
13463         case FLASH_5761VENDOR_ATMEL_ADB161D:
13464         case FLASH_5761VENDOR_ATMEL_MDB021D:
13465         case FLASH_5761VENDOR_ATMEL_MDB041D:
13466         case FLASH_5761VENDOR_ATMEL_MDB081D:
13467         case FLASH_5761VENDOR_ATMEL_MDB161D:
13468                 tp->nvram_jedecnum = JEDEC_ATMEL;
13469                 tg3_flag_set(tp, NVRAM_BUFFERED);
13470                 tg3_flag_set(tp, FLASH);
13471                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13472                 tp->nvram_pagesize = 256;
13473                 break;
13474         case FLASH_5761VENDOR_ST_A_M45PE20:
13475         case FLASH_5761VENDOR_ST_A_M45PE40:
13476         case FLASH_5761VENDOR_ST_A_M45PE80:
13477         case FLASH_5761VENDOR_ST_A_M45PE16:
13478         case FLASH_5761VENDOR_ST_M_M45PE20:
13479         case FLASH_5761VENDOR_ST_M_M45PE40:
13480         case FLASH_5761VENDOR_ST_M_M45PE80:
13481         case FLASH_5761VENDOR_ST_M_M45PE16:
13482                 tp->nvram_jedecnum = JEDEC_ST;
13483                 tg3_flag_set(tp, NVRAM_BUFFERED);
13484                 tg3_flag_set(tp, FLASH);
13485                 tp->nvram_pagesize = 256;
13486                 break;
13487         }
13488
13489         if (protect) {
13490                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13491         } else {
13492                 switch (nvcfg1) {
13493                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13494                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13495                 case FLASH_5761VENDOR_ST_A_M45PE16:
13496                 case FLASH_5761VENDOR_ST_M_M45PE16:
13497                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13498                         break;
13499                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13500                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13501                 case FLASH_5761VENDOR_ST_A_M45PE80:
13502                 case FLASH_5761VENDOR_ST_M_M45PE80:
13503                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13504                         break;
13505                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13506                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13507                 case FLASH_5761VENDOR_ST_A_M45PE40:
13508                 case FLASH_5761VENDOR_ST_M_M45PE40:
13509                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13510                         break;
13511                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13512                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13513                 case FLASH_5761VENDOR_ST_A_M45PE20:
13514                 case FLASH_5761VENDOR_ST_M_M45PE20:
13515                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13516                         break;
13517                 }
13518         }
13519 }
13520
13521 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13522 {
13523         tp->nvram_jedecnum = JEDEC_ATMEL;
13524         tg3_flag_set(tp, NVRAM_BUFFERED);
13525         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13526 }
13527
13528 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13529 {
13530         u32 nvcfg1;
13531
13532         nvcfg1 = tr32(NVRAM_CFG1);
13533
13534         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13535         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13536         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13537                 tp->nvram_jedecnum = JEDEC_ATMEL;
13538                 tg3_flag_set(tp, NVRAM_BUFFERED);
13539                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13540
13541                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13542                 tw32(NVRAM_CFG1, nvcfg1);
13543                 return;
13544         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13545         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13546         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13547         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13548         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13549         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13550         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13551                 tp->nvram_jedecnum = JEDEC_ATMEL;
13552                 tg3_flag_set(tp, NVRAM_BUFFERED);
13553                 tg3_flag_set(tp, FLASH);
13554
13555                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13556                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13557                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13558                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13559                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13560                         break;
13561                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13562                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13563                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13564                         break;
13565                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13566                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13567                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13568                         break;
13569                 }
13570                 break;
13571         case FLASH_5752VENDOR_ST_M45PE10:
13572         case FLASH_5752VENDOR_ST_M45PE20:
13573         case FLASH_5752VENDOR_ST_M45PE40:
13574                 tp->nvram_jedecnum = JEDEC_ST;
13575                 tg3_flag_set(tp, NVRAM_BUFFERED);
13576                 tg3_flag_set(tp, FLASH);
13577
13578                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13579                 case FLASH_5752VENDOR_ST_M45PE10:
13580                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13581                         break;
13582                 case FLASH_5752VENDOR_ST_M45PE20:
13583                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13584                         break;
13585                 case FLASH_5752VENDOR_ST_M45PE40:
13586                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13587                         break;
13588                 }
13589                 break;
13590         default:
13591                 tg3_flag_set(tp, NO_NVRAM);
13592                 return;
13593         }
13594
13595         tg3_nvram_get_pagesize(tp, nvcfg1);
13596         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13597                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13598 }
13599
13600
13601 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13602 {
13603         u32 nvcfg1;
13604
13605         nvcfg1 = tr32(NVRAM_CFG1);
13606
13607         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13608         case FLASH_5717VENDOR_ATMEL_EEPROM:
13609         case FLASH_5717VENDOR_MICRO_EEPROM:
13610                 tp->nvram_jedecnum = JEDEC_ATMEL;
13611                 tg3_flag_set(tp, NVRAM_BUFFERED);
13612                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13613
13614                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13615                 tw32(NVRAM_CFG1, nvcfg1);
13616                 return;
13617         case FLASH_5717VENDOR_ATMEL_MDB011D:
13618         case FLASH_5717VENDOR_ATMEL_ADB011B:
13619         case FLASH_5717VENDOR_ATMEL_ADB011D:
13620         case FLASH_5717VENDOR_ATMEL_MDB021D:
13621         case FLASH_5717VENDOR_ATMEL_ADB021B:
13622         case FLASH_5717VENDOR_ATMEL_ADB021D:
13623         case FLASH_5717VENDOR_ATMEL_45USPT:
13624                 tp->nvram_jedecnum = JEDEC_ATMEL;
13625                 tg3_flag_set(tp, NVRAM_BUFFERED);
13626                 tg3_flag_set(tp, FLASH);
13627
13628                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13629                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13630                         /* Detect size with tg3_nvram_get_size() */
13631                         break;
13632                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13633                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13634                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13635                         break;
13636                 default:
13637                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13638                         break;
13639                 }
13640                 break;
13641         case FLASH_5717VENDOR_ST_M_M25PE10:
13642         case FLASH_5717VENDOR_ST_A_M25PE10:
13643         case FLASH_5717VENDOR_ST_M_M45PE10:
13644         case FLASH_5717VENDOR_ST_A_M45PE10:
13645         case FLASH_5717VENDOR_ST_M_M25PE20:
13646         case FLASH_5717VENDOR_ST_A_M25PE20:
13647         case FLASH_5717VENDOR_ST_M_M45PE20:
13648         case FLASH_5717VENDOR_ST_A_M45PE20:
13649         case FLASH_5717VENDOR_ST_25USPT:
13650         case FLASH_5717VENDOR_ST_45USPT:
13651                 tp->nvram_jedecnum = JEDEC_ST;
13652                 tg3_flag_set(tp, NVRAM_BUFFERED);
13653                 tg3_flag_set(tp, FLASH);
13654
13655                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13656                 case FLASH_5717VENDOR_ST_M_M25PE20:
13657                 case FLASH_5717VENDOR_ST_M_M45PE20:
13658                         /* Detect size with tg3_nvram_get_size() */
13659                         break;
13660                 case FLASH_5717VENDOR_ST_A_M25PE20:
13661                 case FLASH_5717VENDOR_ST_A_M45PE20:
13662                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13663                         break;
13664                 default:
13665                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13666                         break;
13667                 }
13668                 break;
13669         default:
13670                 tg3_flag_set(tp, NO_NVRAM);
13671                 return;
13672         }
13673
13674         tg3_nvram_get_pagesize(tp, nvcfg1);
13675         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13676                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13677 }
13678
13679 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13680 {
13681         u32 nvcfg1, nvmpinstrp;
13682
13683         nvcfg1 = tr32(NVRAM_CFG1);
13684         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13685
13686         switch (nvmpinstrp) {
13687         case FLASH_5720_EEPROM_HD:
13688         case FLASH_5720_EEPROM_LD:
13689                 tp->nvram_jedecnum = JEDEC_ATMEL;
13690                 tg3_flag_set(tp, NVRAM_BUFFERED);
13691
13692                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13693                 tw32(NVRAM_CFG1, nvcfg1);
13694                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13695                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13696                 else
13697                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13698                 return;
13699         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13700         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13701         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13702         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13703         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13704         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13705         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13706         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13707         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13708         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13709         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13710         case FLASH_5720VENDOR_ATMEL_45USPT:
13711                 tp->nvram_jedecnum = JEDEC_ATMEL;
13712                 tg3_flag_set(tp, NVRAM_BUFFERED);
13713                 tg3_flag_set(tp, FLASH);
13714
13715                 switch (nvmpinstrp) {
13716                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13717                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13718                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13719                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13720                         break;
13721                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13722                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13723                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13724                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13725                         break;
13726                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13727                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13728                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13729                         break;
13730                 default:
13731                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13732                         break;
13733                 }
13734                 break;
13735         case FLASH_5720VENDOR_M_ST_M25PE10:
13736         case FLASH_5720VENDOR_M_ST_M45PE10:
13737         case FLASH_5720VENDOR_A_ST_M25PE10:
13738         case FLASH_5720VENDOR_A_ST_M45PE10:
13739         case FLASH_5720VENDOR_M_ST_M25PE20:
13740         case FLASH_5720VENDOR_M_ST_M45PE20:
13741         case FLASH_5720VENDOR_A_ST_M25PE20:
13742         case FLASH_5720VENDOR_A_ST_M45PE20:
13743         case FLASH_5720VENDOR_M_ST_M25PE40:
13744         case FLASH_5720VENDOR_M_ST_M45PE40:
13745         case FLASH_5720VENDOR_A_ST_M25PE40:
13746         case FLASH_5720VENDOR_A_ST_M45PE40:
13747         case FLASH_5720VENDOR_M_ST_M25PE80:
13748         case FLASH_5720VENDOR_M_ST_M45PE80:
13749         case FLASH_5720VENDOR_A_ST_M25PE80:
13750         case FLASH_5720VENDOR_A_ST_M45PE80:
13751         case FLASH_5720VENDOR_ST_25USPT:
13752         case FLASH_5720VENDOR_ST_45USPT:
13753                 tp->nvram_jedecnum = JEDEC_ST;
13754                 tg3_flag_set(tp, NVRAM_BUFFERED);
13755                 tg3_flag_set(tp, FLASH);
13756
13757                 switch (nvmpinstrp) {
13758                 case FLASH_5720VENDOR_M_ST_M25PE20:
13759                 case FLASH_5720VENDOR_M_ST_M45PE20:
13760                 case FLASH_5720VENDOR_A_ST_M25PE20:
13761                 case FLASH_5720VENDOR_A_ST_M45PE20:
13762                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13763                         break;
13764                 case FLASH_5720VENDOR_M_ST_M25PE40:
13765                 case FLASH_5720VENDOR_M_ST_M45PE40:
13766                 case FLASH_5720VENDOR_A_ST_M25PE40:
13767                 case FLASH_5720VENDOR_A_ST_M45PE40:
13768                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13769                         break;
13770                 case FLASH_5720VENDOR_M_ST_M25PE80:
13771                 case FLASH_5720VENDOR_M_ST_M45PE80:
13772                 case FLASH_5720VENDOR_A_ST_M25PE80:
13773                 case FLASH_5720VENDOR_A_ST_M45PE80:
13774                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13775                         break;
13776                 default:
13777                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13778                         break;
13779                 }
13780                 break;
13781         default:
13782                 tg3_flag_set(tp, NO_NVRAM);
13783                 return;
13784         }
13785
13786         tg3_nvram_get_pagesize(tp, nvcfg1);
13787         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13788                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13789 }
13790
13791 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13792 static void tg3_nvram_init(struct tg3 *tp)
13793 {
13794         tw32_f(GRC_EEPROM_ADDR,
13795              (EEPROM_ADDR_FSM_RESET |
13796               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13797                EEPROM_ADDR_CLKPERD_SHIFT)));
13798
13799         msleep(1);
13800
13801         /* Enable seeprom accesses. */
13802         tw32_f(GRC_LOCAL_CTRL,
13803              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13804         udelay(100);
13805
13806         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13807             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13808                 tg3_flag_set(tp, NVRAM);
13809
13810                 if (tg3_nvram_lock(tp)) {
13811                         netdev_warn(tp->dev,
13812                                     "Cannot get nvram lock, %s failed\n",
13813                                     __func__);
13814                         return;
13815                 }
13816                 tg3_enable_nvram_access(tp);
13817
13818                 tp->nvram_size = 0;
13819
13820                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13821                         tg3_get_5752_nvram_info(tp);
13822                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13823                         tg3_get_5755_nvram_info(tp);
13824                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13825                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13826                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13827                         tg3_get_5787_nvram_info(tp);
13828                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13829                         tg3_get_5761_nvram_info(tp);
13830                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13831                         tg3_get_5906_nvram_info(tp);
13832                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13833                          tg3_flag(tp, 57765_CLASS))
13834                         tg3_get_57780_nvram_info(tp);
13835                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13836                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13837                         tg3_get_5717_nvram_info(tp);
13838                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13839                         tg3_get_5720_nvram_info(tp);
13840                 else
13841                         tg3_get_nvram_info(tp);
13842
13843                 if (tp->nvram_size == 0)
13844                         tg3_get_nvram_size(tp);
13845
13846                 tg3_disable_nvram_access(tp);
13847                 tg3_nvram_unlock(tp);
13848
13849         } else {
13850                 tg3_flag_clear(tp, NVRAM);
13851                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13852
13853                 tg3_get_eeprom_size(tp);
13854         }
13855 }
13856
13857 struct subsys_tbl_ent {
13858         u16 subsys_vendor, subsys_devid;
13859         u32 phy_id;
13860 };
13861
13862 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13863         /* Broadcom boards. */
13864         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13865           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13866         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13867           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13868         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13869           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13870         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13871           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13872         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13873           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13874         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13875           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13876         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13877           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13878         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13879           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13880         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13881           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13882         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13883           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13884         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13885           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13886
13887         /* 3com boards. */
13888         { TG3PCI_SUBVENDOR_ID_3COM,
13889           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13890         { TG3PCI_SUBVENDOR_ID_3COM,
13891           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13892         { TG3PCI_SUBVENDOR_ID_3COM,
13893           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13894         { TG3PCI_SUBVENDOR_ID_3COM,
13895           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13896         { TG3PCI_SUBVENDOR_ID_3COM,
13897           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13898
13899         /* DELL boards. */
13900         { TG3PCI_SUBVENDOR_ID_DELL,
13901           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13902         { TG3PCI_SUBVENDOR_ID_DELL,
13903           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13904         { TG3PCI_SUBVENDOR_ID_DELL,
13905           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13906         { TG3PCI_SUBVENDOR_ID_DELL,
13907           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13908
13909         /* Compaq boards. */
13910         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13911           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13912         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13913           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13914         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13915           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13916         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13917           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13918         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13919           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13920
13921         /* IBM boards. */
13922         { TG3PCI_SUBVENDOR_ID_IBM,
13923           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13924 };
13925
13926 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13927 {
13928         int i;
13929
13930         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13931                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13932                      tp->pdev->subsystem_vendor) &&
13933                     (subsys_id_to_phy_id[i].subsys_devid ==
13934                      tp->pdev->subsystem_device))
13935                         return &subsys_id_to_phy_id[i];
13936         }
13937         return NULL;
13938 }
13939
13940 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13941 {
13942         u32 val;
13943
13944         tp->phy_id = TG3_PHY_ID_INVALID;
13945         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13946
13947         /* Assume an onboard device and WOL capable by default.  */
13948         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13949         tg3_flag_set(tp, WOL_CAP);
13950
13951         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13952                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13953                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13954                         tg3_flag_set(tp, IS_NIC);
13955                 }
13956                 val = tr32(VCPU_CFGSHDW);
13957                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13958                         tg3_flag_set(tp, ASPM_WORKAROUND);
13959                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13960                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13961                         tg3_flag_set(tp, WOL_ENABLE);
13962                         device_set_wakeup_enable(&tp->pdev->dev, true);
13963                 }
13964                 goto done;
13965         }
13966
13967         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13968         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13969                 u32 nic_cfg, led_cfg;
13970                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13971                 int eeprom_phy_serdes = 0;
13972
13973                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13974                 tp->nic_sram_data_cfg = nic_cfg;
13975
13976                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13977                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13978                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13979                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13980                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13981                     (ver > 0) && (ver < 0x100))
13982                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13983
13984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13985                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13986
13987                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13988                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13989                         eeprom_phy_serdes = 1;
13990
13991                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13992                 if (nic_phy_id != 0) {
13993                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13994                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13995
13996                         eeprom_phy_id  = (id1 >> 16) << 10;
13997                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13998                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13999                 } else
14000                         eeprom_phy_id = 0;
14001
14002                 tp->phy_id = eeprom_phy_id;
14003                 if (eeprom_phy_serdes) {
14004                         if (!tg3_flag(tp, 5705_PLUS))
14005                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14006                         else
14007                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14008                 }
14009
14010                 if (tg3_flag(tp, 5750_PLUS))
14011                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14012                                     SHASTA_EXT_LED_MODE_MASK);
14013                 else
14014                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14015
14016                 switch (led_cfg) {
14017                 default:
14018                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14019                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14020                         break;
14021
14022                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14023                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14024                         break;
14025
14026                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14027                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14028
14029                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14030                          * read on some older 5700/5701 bootcode.
14031                          */
14032                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14033                             ASIC_REV_5700 ||
14034                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
14035                             ASIC_REV_5701)
14036                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14037
14038                         break;
14039
14040                 case SHASTA_EXT_LED_SHARED:
14041                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14042                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
14043                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
14044                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14045                                                  LED_CTRL_MODE_PHY_2);
14046                         break;
14047
14048                 case SHASTA_EXT_LED_MAC:
14049                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14050                         break;
14051
14052                 case SHASTA_EXT_LED_COMBO:
14053                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14054                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
14055                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14056                                                  LED_CTRL_MODE_PHY_2);
14057                         break;
14058
14059                 }
14060
14061                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14062                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
14063                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14064                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14065
14066                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
14067                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14068
14069                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14070                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14071                         if ((tp->pdev->subsystem_vendor ==
14072                              PCI_VENDOR_ID_ARIMA) &&
14073                             (tp->pdev->subsystem_device == 0x205a ||
14074                              tp->pdev->subsystem_device == 0x2063))
14075                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14076                 } else {
14077                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14078                         tg3_flag_set(tp, IS_NIC);
14079                 }
14080
14081                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14082                         tg3_flag_set(tp, ENABLE_ASF);
14083                         if (tg3_flag(tp, 5750_PLUS))
14084                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14085                 }
14086
14087                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14088                     tg3_flag(tp, 5750_PLUS))
14089                         tg3_flag_set(tp, ENABLE_APE);
14090
14091                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14092                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14093                         tg3_flag_clear(tp, WOL_CAP);
14094
14095                 if (tg3_flag(tp, WOL_CAP) &&
14096                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14097                         tg3_flag_set(tp, WOL_ENABLE);
14098                         device_set_wakeup_enable(&tp->pdev->dev, true);
14099                 }
14100
14101                 if (cfg2 & (1 << 17))
14102                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14103
14104                 /* serdes signal pre-emphasis in register 0x590 set by */
14105                 /* bootcode if bit 18 is set */
14106                 if (cfg2 & (1 << 18))
14107                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14108
14109                 if ((tg3_flag(tp, 57765_PLUS) ||
14110                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14111                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
14112                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14113                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14114
14115                 if (tg3_flag(tp, PCI_EXPRESS) &&
14116                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14117                     !tg3_flag(tp, 57765_PLUS)) {
14118                         u32 cfg3;
14119
14120                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14121                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
14122                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14123                 }
14124
14125                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14126                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14127                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14128                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14129                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14130                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14131         }
14132 done:
14133         if (tg3_flag(tp, WOL_CAP))
14134                 device_set_wakeup_enable(&tp->pdev->dev,
14135                                          tg3_flag(tp, WOL_ENABLE));
14136         else
14137                 device_set_wakeup_capable(&tp->pdev->dev, false);
14138 }
14139
14140 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14141 {
14142         int i;
14143         u32 val;
14144
14145         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14146         tw32(OTP_CTRL, cmd);
14147
14148         /* Wait for up to 1 ms for command to execute. */
14149         for (i = 0; i < 100; i++) {
14150                 val = tr32(OTP_STATUS);
14151                 if (val & OTP_STATUS_CMD_DONE)
14152                         break;
14153                 udelay(10);
14154         }
14155
14156         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14157 }
14158
14159 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14160  * configuration is a 32-bit value that straddles the alignment boundary.
14161  * We do two 32-bit reads and then shift and merge the results.
14162  */
14163 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14164 {
14165         u32 bhalf_otp, thalf_otp;
14166
14167         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14168
14169         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14170                 return 0;
14171
14172         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14173
14174         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14175                 return 0;
14176
14177         thalf_otp = tr32(OTP_READ_DATA);
14178
14179         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14180
14181         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14182                 return 0;
14183
14184         bhalf_otp = tr32(OTP_READ_DATA);
14185
14186         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14187 }
14188
14189 static void tg3_phy_init_link_config(struct tg3 *tp)
14190 {
14191         u32 adv = ADVERTISED_Autoneg;
14192
14193         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14194                 adv |= ADVERTISED_1000baseT_Half |
14195                        ADVERTISED_1000baseT_Full;
14196
14197         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14198                 adv |= ADVERTISED_100baseT_Half |
14199                        ADVERTISED_100baseT_Full |
14200                        ADVERTISED_10baseT_Half |
14201                        ADVERTISED_10baseT_Full |
14202                        ADVERTISED_TP;
14203         else
14204                 adv |= ADVERTISED_FIBRE;
14205
14206         tp->link_config.advertising = adv;
14207         tp->link_config.speed = SPEED_UNKNOWN;
14208         tp->link_config.duplex = DUPLEX_UNKNOWN;
14209         tp->link_config.autoneg = AUTONEG_ENABLE;
14210         tp->link_config.active_speed = SPEED_UNKNOWN;
14211         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14212
14213         tp->old_link = -1;
14214 }
14215
14216 static int tg3_phy_probe(struct tg3 *tp)
14217 {
14218         u32 hw_phy_id_1, hw_phy_id_2;
14219         u32 hw_phy_id, hw_phy_id_masked;
14220         int err;
14221
14222         /* flow control autonegotiation is default behavior */
14223         tg3_flag_set(tp, PAUSE_AUTONEG);
14224         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14225
14226         if (tg3_flag(tp, ENABLE_APE)) {
14227                 switch (tp->pci_fn) {
14228                 case 0:
14229                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14230                         break;
14231                 case 1:
14232                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14233                         break;
14234                 case 2:
14235                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14236                         break;
14237                 case 3:
14238                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14239                         break;
14240                 }
14241         }
14242
14243         if (tg3_flag(tp, USE_PHYLIB))
14244                 return tg3_phy_init(tp);
14245
14246         /* Reading the PHY ID register can conflict with ASF
14247          * firmware access to the PHY hardware.
14248          */
14249         err = 0;
14250         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
14251                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
14252         } else {
14253                 /* Now read the physical PHY_ID from the chip and verify
14254                  * that it is sane.  If it doesn't look good, we fall back
14255                  * to either the hard-coded table based PHY_ID and failing
14256                  * that the value found in the eeprom area.
14257                  */
14258                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
14259                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
14260
14261                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
14262                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
14263                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
14264
14265                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
14266         }
14267
14268         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
14269                 tp->phy_id = hw_phy_id;
14270                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
14271                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14272                 else
14273                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
14274         } else {
14275                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
14276                         /* Do nothing, phy ID already set up in
14277                          * tg3_get_eeprom_hw_cfg().
14278                          */
14279                 } else {
14280                         struct subsys_tbl_ent *p;
14281
14282                         /* No eeprom signature?  Try the hardcoded
14283                          * subsys device table.
14284                          */
14285                         p = tg3_lookup_by_subsys(tp);
14286                         if (!p)
14287                                 return -ENODEV;
14288
14289                         tp->phy_id = p->phy_id;
14290                         if (!tp->phy_id ||
14291                             tp->phy_id == TG3_PHY_ID_BCM8002)
14292                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14293                 }
14294         }
14295
14296         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14297             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14298              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14299              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14300               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14301              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14302               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14303                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14304
14305         tg3_phy_init_link_config(tp);
14306
14307         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14308             !tg3_flag(tp, ENABLE_APE) &&
14309             !tg3_flag(tp, ENABLE_ASF)) {
14310                 u32 bmsr, dummy;
14311
14312                 tg3_readphy(tp, MII_BMSR, &bmsr);
14313                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14314                     (bmsr & BMSR_LSTATUS))
14315                         goto skip_phy_reset;
14316
14317                 err = tg3_phy_reset(tp);
14318                 if (err)
14319                         return err;
14320
14321                 tg3_phy_set_wirespeed(tp);
14322
14323                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14324                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14325                                             tp->link_config.flowctrl);
14326
14327                         tg3_writephy(tp, MII_BMCR,
14328                                      BMCR_ANENABLE | BMCR_ANRESTART);
14329                 }
14330         }
14331
14332 skip_phy_reset:
14333         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14334                 err = tg3_init_5401phy_dsp(tp);
14335                 if (err)
14336                         return err;
14337
14338                 err = tg3_init_5401phy_dsp(tp);
14339         }
14340
14341         return err;
14342 }
14343
14344 static void tg3_read_vpd(struct tg3 *tp)
14345 {
14346         u8 *vpd_data;
14347         unsigned int block_end, rosize, len;
14348         u32 vpdlen;
14349         int j, i = 0;
14350
14351         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14352         if (!vpd_data)
14353                 goto out_no_vpd;
14354
14355         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14356         if (i < 0)
14357                 goto out_not_found;
14358
14359         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14360         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14361         i += PCI_VPD_LRDT_TAG_SIZE;
14362
14363         if (block_end > vpdlen)
14364                 goto out_not_found;
14365
14366         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14367                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14368         if (j > 0) {
14369                 len = pci_vpd_info_field_size(&vpd_data[j]);
14370
14371                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14372                 if (j + len > block_end || len != 4 ||
14373                     memcmp(&vpd_data[j], "1028", 4))
14374                         goto partno;
14375
14376                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14377                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14378                 if (j < 0)
14379                         goto partno;
14380
14381                 len = pci_vpd_info_field_size(&vpd_data[j]);
14382
14383                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14384                 if (j + len > block_end)
14385                         goto partno;
14386
14387                 memcpy(tp->fw_ver, &vpd_data[j], len);
14388                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14389         }
14390
14391 partno:
14392         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14393                                       PCI_VPD_RO_KEYWORD_PARTNO);
14394         if (i < 0)
14395                 goto out_not_found;
14396
14397         len = pci_vpd_info_field_size(&vpd_data[i]);
14398
14399         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14400         if (len > TG3_BPN_SIZE ||
14401             (len + i) > vpdlen)
14402                 goto out_not_found;
14403
14404         memcpy(tp->board_part_number, &vpd_data[i], len);
14405
14406 out_not_found:
14407         kfree(vpd_data);
14408         if (tp->board_part_number[0])
14409                 return;
14410
14411 out_no_vpd:
14412         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14413                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14414                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14415                         strcpy(tp->board_part_number, "BCM5717");
14416                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14417                         strcpy(tp->board_part_number, "BCM5718");
14418                 else
14419                         goto nomatch;
14420         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14421                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14422                         strcpy(tp->board_part_number, "BCM57780");
14423                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14424                         strcpy(tp->board_part_number, "BCM57760");
14425                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14426                         strcpy(tp->board_part_number, "BCM57790");
14427                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14428                         strcpy(tp->board_part_number, "BCM57788");
14429                 else
14430                         goto nomatch;
14431         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14432                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14433                         strcpy(tp->board_part_number, "BCM57761");
14434                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14435                         strcpy(tp->board_part_number, "BCM57765");
14436                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14437                         strcpy(tp->board_part_number, "BCM57781");
14438                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14439                         strcpy(tp->board_part_number, "BCM57785");
14440                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14441                         strcpy(tp->board_part_number, "BCM57791");
14442                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14443                         strcpy(tp->board_part_number, "BCM57795");
14444                 else
14445                         goto nomatch;
14446         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14447                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14448                         strcpy(tp->board_part_number, "BCM57762");
14449                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14450                         strcpy(tp->board_part_number, "BCM57766");
14451                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14452                         strcpy(tp->board_part_number, "BCM57782");
14453                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14454                         strcpy(tp->board_part_number, "BCM57786");
14455                 else
14456                         goto nomatch;
14457         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14458                 strcpy(tp->board_part_number, "BCM95906");
14459         } else {
14460 nomatch:
14461                 strcpy(tp->board_part_number, "none");
14462         }
14463 }
14464
14465 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14466 {
14467         u32 val;
14468
14469         if (tg3_nvram_read(tp, offset, &val) ||
14470             (val & 0xfc000000) != 0x0c000000 ||
14471             tg3_nvram_read(tp, offset + 4, &val) ||
14472             val != 0)
14473                 return 0;
14474
14475         return 1;
14476 }
14477
14478 static void tg3_read_bc_ver(struct tg3 *tp)
14479 {
14480         u32 val, offset, start, ver_offset;
14481         int i, dst_off;
14482         bool newver = false;
14483
14484         if (tg3_nvram_read(tp, 0xc, &offset) ||
14485             tg3_nvram_read(tp, 0x4, &start))
14486                 return;
14487
14488         offset = tg3_nvram_logical_addr(tp, offset);
14489
14490         if (tg3_nvram_read(tp, offset, &val))
14491                 return;
14492
14493         if ((val & 0xfc000000) == 0x0c000000) {
14494                 if (tg3_nvram_read(tp, offset + 4, &val))
14495                         return;
14496
14497                 if (val == 0)
14498                         newver = true;
14499         }
14500
14501         dst_off = strlen(tp->fw_ver);
14502
14503         if (newver) {
14504                 if (TG3_VER_SIZE - dst_off < 16 ||
14505                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14506                         return;
14507
14508                 offset = offset + ver_offset - start;
14509                 for (i = 0; i < 16; i += 4) {
14510                         __be32 v;
14511                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14512                                 return;
14513
14514                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14515                 }
14516         } else {
14517                 u32 major, minor;
14518
14519                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14520                         return;
14521
14522                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14523                         TG3_NVM_BCVER_MAJSFT;
14524                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14525                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14526                          "v%d.%02d", major, minor);
14527         }
14528 }
14529
14530 static void tg3_read_hwsb_ver(struct tg3 *tp)
14531 {
14532         u32 val, major, minor;
14533
14534         /* Use native endian representation */
14535         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14536                 return;
14537
14538         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14539                 TG3_NVM_HWSB_CFG1_MAJSFT;
14540         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14541                 TG3_NVM_HWSB_CFG1_MINSFT;
14542
14543         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14544 }
14545
14546 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14547 {
14548         u32 offset, major, minor, build;
14549
14550         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14551
14552         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14553                 return;
14554
14555         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14556         case TG3_EEPROM_SB_REVISION_0:
14557                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14558                 break;
14559         case TG3_EEPROM_SB_REVISION_2:
14560                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14561                 break;
14562         case TG3_EEPROM_SB_REVISION_3:
14563                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14564                 break;
14565         case TG3_EEPROM_SB_REVISION_4:
14566                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14567                 break;
14568         case TG3_EEPROM_SB_REVISION_5:
14569                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14570                 break;
14571         case TG3_EEPROM_SB_REVISION_6:
14572                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14573                 break;
14574         default:
14575                 return;
14576         }
14577
14578         if (tg3_nvram_read(tp, offset, &val))
14579                 return;
14580
14581         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14582                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14583         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14584                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14585         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14586
14587         if (minor > 99 || build > 26)
14588                 return;
14589
14590         offset = strlen(tp->fw_ver);
14591         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14592                  " v%d.%02d", major, minor);
14593
14594         if (build > 0) {
14595                 offset = strlen(tp->fw_ver);
14596                 if (offset < TG3_VER_SIZE - 1)
14597                         tp->fw_ver[offset] = 'a' + build - 1;
14598         }
14599 }
14600
14601 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14602 {
14603         u32 val, offset, start;
14604         int i, vlen;
14605
14606         for (offset = TG3_NVM_DIR_START;
14607              offset < TG3_NVM_DIR_END;
14608              offset += TG3_NVM_DIRENT_SIZE) {
14609                 if (tg3_nvram_read(tp, offset, &val))
14610                         return;
14611
14612                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14613                         break;
14614         }
14615
14616         if (offset == TG3_NVM_DIR_END)
14617                 return;
14618
14619         if (!tg3_flag(tp, 5705_PLUS))
14620                 start = 0x08000000;
14621         else if (tg3_nvram_read(tp, offset - 4, &start))
14622                 return;
14623
14624         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14625             !tg3_fw_img_is_valid(tp, offset) ||
14626             tg3_nvram_read(tp, offset + 8, &val))
14627                 return;
14628
14629         offset += val - start;
14630
14631         vlen = strlen(tp->fw_ver);
14632
14633         tp->fw_ver[vlen++] = ',';
14634         tp->fw_ver[vlen++] = ' ';
14635
14636         for (i = 0; i < 4; i++) {
14637                 __be32 v;
14638                 if (tg3_nvram_read_be32(tp, offset, &v))
14639                         return;
14640
14641                 offset += sizeof(v);
14642
14643                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14644                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14645                         break;
14646                 }
14647
14648                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14649                 vlen += sizeof(v);
14650         }
14651 }
14652
14653 static void tg3_probe_ncsi(struct tg3 *tp)
14654 {
14655         u32 apedata;
14656
14657         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14658         if (apedata != APE_SEG_SIG_MAGIC)
14659                 return;
14660
14661         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14662         if (!(apedata & APE_FW_STATUS_READY))
14663                 return;
14664
14665         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14666                 tg3_flag_set(tp, APE_HAS_NCSI);
14667 }
14668
14669 static void tg3_read_dash_ver(struct tg3 *tp)
14670 {
14671         int vlen;
14672         u32 apedata;
14673         char *fwtype;
14674
14675         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14676
14677         if (tg3_flag(tp, APE_HAS_NCSI))
14678                 fwtype = "NCSI";
14679         else
14680                 fwtype = "DASH";
14681
14682         vlen = strlen(tp->fw_ver);
14683
14684         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14685                  fwtype,
14686                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14687                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14688                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14689                  (apedata & APE_FW_VERSION_BLDMSK));
14690 }
14691
14692 static void tg3_read_fw_ver(struct tg3 *tp)
14693 {
14694         u32 val;
14695         bool vpd_vers = false;
14696
14697         if (tp->fw_ver[0] != 0)
14698                 vpd_vers = true;
14699
14700         if (tg3_flag(tp, NO_NVRAM)) {
14701                 strcat(tp->fw_ver, "sb");
14702                 return;
14703         }
14704
14705         if (tg3_nvram_read(tp, 0, &val))
14706                 return;
14707
14708         if (val == TG3_EEPROM_MAGIC)
14709                 tg3_read_bc_ver(tp);
14710         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14711                 tg3_read_sb_ver(tp, val);
14712         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14713                 tg3_read_hwsb_ver(tp);
14714
14715         if (tg3_flag(tp, ENABLE_ASF)) {
14716                 if (tg3_flag(tp, ENABLE_APE)) {
14717                         tg3_probe_ncsi(tp);
14718                         if (!vpd_vers)
14719                                 tg3_read_dash_ver(tp);
14720                 } else if (!vpd_vers) {
14721                         tg3_read_mgmtfw_ver(tp);
14722                 }
14723         }
14724
14725         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14726 }
14727
14728 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14729 {
14730         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14731                 return TG3_RX_RET_MAX_SIZE_5717;
14732         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14733                 return TG3_RX_RET_MAX_SIZE_5700;
14734         else
14735                 return TG3_RX_RET_MAX_SIZE_5705;
14736 }
14737
14738 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14739         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14740         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14741         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14742         { },
14743 };
14744
14745 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14746 {
14747         struct pci_dev *peer;
14748         unsigned int func, devnr = tp->pdev->devfn & ~7;
14749
14750         for (func = 0; func < 8; func++) {
14751                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14752                 if (peer && peer != tp->pdev)
14753                         break;
14754                 pci_dev_put(peer);
14755         }
14756         /* 5704 can be configured in single-port mode, set peer to
14757          * tp->pdev in that case.
14758          */
14759         if (!peer) {
14760                 peer = tp->pdev;
14761                 return peer;
14762         }
14763
14764         /*
14765          * We don't need to keep the refcount elevated; there's no way
14766          * to remove one half of this device without removing the other
14767          */
14768         pci_dev_put(peer);
14769
14770         return peer;
14771 }
14772
14773 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14774 {
14775         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14776         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14777                 u32 reg;
14778
14779                 /* All devices that use the alternate
14780                  * ASIC REV location have a CPMU.
14781                  */
14782                 tg3_flag_set(tp, CPMU_PRESENT);
14783
14784                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14785                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14786                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14787                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14788                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14789                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14790                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14791                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14792                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14793                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14794                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14795                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14796                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14797                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14798                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14799                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14800                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14801                 else
14802                         reg = TG3PCI_PRODID_ASICREV;
14803
14804                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14805         }
14806
14807         /* Wrong chip ID in 5752 A0. This code can be removed later
14808          * as A0 is not in production.
14809          */
14810         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14811                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14812
14813         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14814                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14815
14816         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14817             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14818             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14819                 tg3_flag_set(tp, 5717_PLUS);
14820
14821         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14822             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14823                 tg3_flag_set(tp, 57765_CLASS);
14824
14825         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14826                 tg3_flag_set(tp, 57765_PLUS);
14827
14828         /* Intentionally exclude ASIC_REV_5906 */
14829         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14830             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14831             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14832             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14833             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14834             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14835             tg3_flag(tp, 57765_PLUS))
14836                 tg3_flag_set(tp, 5755_PLUS);
14837
14838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14840                 tg3_flag_set(tp, 5780_CLASS);
14841
14842         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14843             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14844             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14845             tg3_flag(tp, 5755_PLUS) ||
14846             tg3_flag(tp, 5780_CLASS))
14847                 tg3_flag_set(tp, 5750_PLUS);
14848
14849         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14850             tg3_flag(tp, 5750_PLUS))
14851                 tg3_flag_set(tp, 5705_PLUS);
14852 }
14853
14854 static bool tg3_10_100_only_device(struct tg3 *tp,
14855                                    const struct pci_device_id *ent)
14856 {
14857         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14858
14859         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14860             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14861             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14862                 return true;
14863
14864         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14865                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14866                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14867                                 return true;
14868                 } else {
14869                         return true;
14870                 }
14871         }
14872
14873         return false;
14874 }
14875
14876 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
14877 {
14878         u32 misc_ctrl_reg;
14879         u32 pci_state_reg, grc_misc_cfg;
14880         u32 val;
14881         u16 pci_cmd;
14882         int err;
14883
14884         /* Force memory write invalidate off.  If we leave it on,
14885          * then on 5700_BX chips we have to enable a workaround.
14886          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14887          * to match the cacheline size.  The Broadcom driver have this
14888          * workaround but turns MWI off all the times so never uses
14889          * it.  This seems to suggest that the workaround is insufficient.
14890          */
14891         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14892         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14893         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14894
14895         /* Important! -- Make sure register accesses are byteswapped
14896          * correctly.  Also, for those chips that require it, make
14897          * sure that indirect register accesses are enabled before
14898          * the first operation.
14899          */
14900         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14901                               &misc_ctrl_reg);
14902         tp->misc_host_ctrl |= (misc_ctrl_reg &
14903                                MISC_HOST_CTRL_CHIPREV);
14904         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14905                                tp->misc_host_ctrl);
14906
14907         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14908
14909         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14910          * we need to disable memory and use config. cycles
14911          * only to access all registers. The 5702/03 chips
14912          * can mistakenly decode the special cycles from the
14913          * ICH chipsets as memory write cycles, causing corruption
14914          * of register and memory space. Only certain ICH bridges
14915          * will drive special cycles with non-zero data during the
14916          * address phase which can fall within the 5703's address
14917          * range. This is not an ICH bug as the PCI spec allows
14918          * non-zero address during special cycles. However, only
14919          * these ICH bridges are known to drive non-zero addresses
14920          * during special cycles.
14921          *
14922          * Since special cycles do not cross PCI bridges, we only
14923          * enable this workaround if the 5703 is on the secondary
14924          * bus of these ICH bridges.
14925          */
14926         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14927             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14928                 static struct tg3_dev_id {
14929                         u32     vendor;
14930                         u32     device;
14931                         u32     rev;
14932                 } ich_chipsets[] = {
14933                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14934                           PCI_ANY_ID },
14935                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14936                           PCI_ANY_ID },
14937                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14938                           0xa },
14939                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14940                           PCI_ANY_ID },
14941                         { },
14942                 };
14943                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14944                 struct pci_dev *bridge = NULL;
14945
14946                 while (pci_id->vendor != 0) {
14947                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14948                                                 bridge);
14949                         if (!bridge) {
14950                                 pci_id++;
14951                                 continue;
14952                         }
14953                         if (pci_id->rev != PCI_ANY_ID) {
14954                                 if (bridge->revision > pci_id->rev)
14955                                         continue;
14956                         }
14957                         if (bridge->subordinate &&
14958                             (bridge->subordinate->number ==
14959                              tp->pdev->bus->number)) {
14960                                 tg3_flag_set(tp, ICH_WORKAROUND);
14961                                 pci_dev_put(bridge);
14962                                 break;
14963                         }
14964                 }
14965         }
14966
14967         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14968                 static struct tg3_dev_id {
14969                         u32     vendor;
14970                         u32     device;
14971                 } bridge_chipsets[] = {
14972                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14973                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14974                         { },
14975                 };
14976                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14977                 struct pci_dev *bridge = NULL;
14978
14979                 while (pci_id->vendor != 0) {
14980                         bridge = pci_get_device(pci_id->vendor,
14981                                                 pci_id->device,
14982                                                 bridge);
14983                         if (!bridge) {
14984                                 pci_id++;
14985                                 continue;
14986                         }
14987                         if (bridge->subordinate &&
14988                             (bridge->subordinate->number <=
14989                              tp->pdev->bus->number) &&
14990                             (bridge->subordinate->busn_res.end >=
14991                              tp->pdev->bus->number)) {
14992                                 tg3_flag_set(tp, 5701_DMA_BUG);
14993                                 pci_dev_put(bridge);
14994                                 break;
14995                         }
14996                 }
14997         }
14998
14999         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15000          * DMA addresses > 40-bit. This bridge may have other additional
15001          * 57xx devices behind it in some 4-port NIC designs for example.
15002          * Any tg3 device found behind the bridge will also need the 40-bit
15003          * DMA workaround.
15004          */
15005         if (tg3_flag(tp, 5780_CLASS)) {
15006                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15007                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15008         } else {
15009                 struct pci_dev *bridge = NULL;
15010
15011                 do {
15012                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15013                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15014                                                 bridge);
15015                         if (bridge && bridge->subordinate &&
15016                             (bridge->subordinate->number <=
15017                              tp->pdev->bus->number) &&
15018                             (bridge->subordinate->busn_res.end >=
15019                              tp->pdev->bus->number)) {
15020                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15021                                 pci_dev_put(bridge);
15022                                 break;
15023                         }
15024                 } while (bridge);
15025         }
15026
15027         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15028             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
15029                 tp->pdev_peer = tg3_find_peer(tp);
15030
15031         /* Determine TSO capabilities */
15032         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
15033                 ; /* Do nothing. HW bug. */
15034         else if (tg3_flag(tp, 57765_PLUS))
15035                 tg3_flag_set(tp, HW_TSO_3);
15036         else if (tg3_flag(tp, 5755_PLUS) ||
15037                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15038                 tg3_flag_set(tp, HW_TSO_2);
15039         else if (tg3_flag(tp, 5750_PLUS)) {
15040                 tg3_flag_set(tp, HW_TSO_1);
15041                 tg3_flag_set(tp, TSO_BUG);
15042                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
15043                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
15044                         tg3_flag_clear(tp, TSO_BUG);
15045         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15046                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15047                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
15048                         tg3_flag_set(tp, TSO_BUG);
15049                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
15050                         tp->fw_needed = FIRMWARE_TG3TSO5;
15051                 else
15052                         tp->fw_needed = FIRMWARE_TG3TSO;
15053         }
15054
15055         /* Selectively allow TSO based on operating conditions */
15056         if (tg3_flag(tp, HW_TSO_1) ||
15057             tg3_flag(tp, HW_TSO_2) ||
15058             tg3_flag(tp, HW_TSO_3) ||
15059             tp->fw_needed) {
15060                 /* For firmware TSO, assume ASF is disabled.
15061                  * We'll disable TSO later if we discover ASF
15062                  * is enabled in tg3_get_eeprom_hw_cfg().
15063                  */
15064                 tg3_flag_set(tp, TSO_CAPABLE);
15065         } else {
15066                 tg3_flag_clear(tp, TSO_CAPABLE);
15067                 tg3_flag_clear(tp, TSO_BUG);
15068                 tp->fw_needed = NULL;
15069         }
15070
15071         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
15072                 tp->fw_needed = FIRMWARE_TG3;
15073
15074         tp->irq_max = 1;
15075
15076         if (tg3_flag(tp, 5750_PLUS)) {
15077                 tg3_flag_set(tp, SUPPORT_MSI);
15078                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
15079                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
15080                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
15081                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
15082                      tp->pdev_peer == tp->pdev))
15083                         tg3_flag_clear(tp, SUPPORT_MSI);
15084
15085                 if (tg3_flag(tp, 5755_PLUS) ||
15086                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15087                         tg3_flag_set(tp, 1SHOT_MSI);
15088                 }
15089
15090                 if (tg3_flag(tp, 57765_PLUS)) {
15091                         tg3_flag_set(tp, SUPPORT_MSIX);
15092                         tp->irq_max = TG3_IRQ_MAX_VECS;
15093                 }
15094         }
15095
15096         tp->txq_max = 1;
15097         tp->rxq_max = 1;
15098         if (tp->irq_max > 1) {
15099                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15100                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15101
15102                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15103                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15104                         tp->txq_max = tp->irq_max - 1;
15105         }
15106
15107         if (tg3_flag(tp, 5755_PLUS) ||
15108             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15109                 tg3_flag_set(tp, SHORT_DMA_BUG);
15110
15111         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
15112                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15113
15114         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15115             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15116             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15117                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15118
15119         if (tg3_flag(tp, 57765_PLUS) &&
15120             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
15121                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15122
15123         if (!tg3_flag(tp, 5705_PLUS) ||
15124             tg3_flag(tp, 5780_CLASS) ||
15125             tg3_flag(tp, USE_JUMBO_BDFLAG))
15126                 tg3_flag_set(tp, JUMBO_CAPABLE);
15127
15128         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15129                               &pci_state_reg);
15130
15131         if (pci_is_pcie(tp->pdev)) {
15132                 u16 lnkctl;
15133
15134                 tg3_flag_set(tp, PCI_EXPRESS);
15135
15136                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15137                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15138                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
15139                             ASIC_REV_5906) {
15140                                 tg3_flag_clear(tp, HW_TSO_2);
15141                                 tg3_flag_clear(tp, TSO_CAPABLE);
15142                         }
15143                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15144                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15145                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
15146                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
15147                                 tg3_flag_set(tp, CLKREQ_BUG);
15148                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
15149                         tg3_flag_set(tp, L1PLLPD_EN);
15150                 }
15151         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
15152                 /* BCM5785 devices are effectively PCIe devices, and should
15153                  * follow PCIe codepaths, but do not have a PCIe capabilities
15154                  * section.
15155                  */
15156                 tg3_flag_set(tp, PCI_EXPRESS);
15157         } else if (!tg3_flag(tp, 5705_PLUS) ||
15158                    tg3_flag(tp, 5780_CLASS)) {
15159                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15160                 if (!tp->pcix_cap) {
15161                         dev_err(&tp->pdev->dev,
15162                                 "Cannot find PCI-X capability, aborting\n");
15163                         return -EIO;
15164                 }
15165
15166                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15167                         tg3_flag_set(tp, PCIX_MODE);
15168         }
15169
15170         /* If we have an AMD 762 or VIA K8T800 chipset, write
15171          * reordering to the mailbox registers done by the host
15172          * controller can cause major troubles.  We read back from
15173          * every mailbox register write to force the writes to be
15174          * posted to the chip in order.
15175          */
15176         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15177             !tg3_flag(tp, PCI_EXPRESS))
15178                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15179
15180         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15181                              &tp->pci_cacheline_sz);
15182         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15183                              &tp->pci_lat_timer);
15184         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
15185             tp->pci_lat_timer < 64) {
15186                 tp->pci_lat_timer = 64;
15187                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15188                                       tp->pci_lat_timer);
15189         }
15190
15191         /* Important! -- It is critical that the PCI-X hw workaround
15192          * situation is decided before the first MMIO register access.
15193          */
15194         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
15195                 /* 5700 BX chips need to have their TX producer index
15196                  * mailboxes written twice to workaround a bug.
15197                  */
15198                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
15199
15200                 /* If we are in PCI-X mode, enable register write workaround.
15201                  *
15202                  * The workaround is to use indirect register accesses
15203                  * for all chip writes not to mailbox registers.
15204                  */
15205                 if (tg3_flag(tp, PCIX_MODE)) {
15206                         u32 pm_reg;
15207
15208                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15209
15210                         /* The chip can have it's power management PCI config
15211                          * space registers clobbered due to this bug.
15212                          * So explicitly force the chip into D0 here.
15213                          */
15214                         pci_read_config_dword(tp->pdev,
15215                                               tp->pm_cap + PCI_PM_CTRL,
15216                                               &pm_reg);
15217                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
15218                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
15219                         pci_write_config_dword(tp->pdev,
15220                                                tp->pm_cap + PCI_PM_CTRL,
15221                                                pm_reg);
15222
15223                         /* Also, force SERR#/PERR# in PCI command. */
15224                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15225                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
15226                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15227                 }
15228         }
15229
15230         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
15231                 tg3_flag_set(tp, PCI_HIGH_SPEED);
15232         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
15233                 tg3_flag_set(tp, PCI_32BIT);
15234
15235         /* Chip-specific fixup from Broadcom driver */
15236         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
15237             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
15238                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
15239                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
15240         }
15241
15242         /* Default fast path register access methods */
15243         tp->read32 = tg3_read32;
15244         tp->write32 = tg3_write32;
15245         tp->read32_mbox = tg3_read32;
15246         tp->write32_mbox = tg3_write32;
15247         tp->write32_tx_mbox = tg3_write32;
15248         tp->write32_rx_mbox = tg3_write32;
15249
15250         /* Various workaround register access methods */
15251         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
15252                 tp->write32 = tg3_write_indirect_reg32;
15253         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
15254                  (tg3_flag(tp, PCI_EXPRESS) &&
15255                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
15256                 /*
15257                  * Back to back register writes can cause problems on these
15258                  * chips, the workaround is to read back all reg writes
15259                  * except those to mailbox regs.
15260                  *
15261                  * See tg3_write_indirect_reg32().
15262                  */
15263                 tp->write32 = tg3_write_flush_reg32;
15264         }
15265
15266         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
15267                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
15268                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
15269                         tp->write32_rx_mbox = tg3_write_flush_reg32;
15270         }
15271
15272         if (tg3_flag(tp, ICH_WORKAROUND)) {
15273                 tp->read32 = tg3_read_indirect_reg32;
15274                 tp->write32 = tg3_write_indirect_reg32;
15275                 tp->read32_mbox = tg3_read_indirect_mbox;
15276                 tp->write32_mbox = tg3_write_indirect_mbox;
15277                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
15278                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15279
15280                 iounmap(tp->regs);
15281                 tp->regs = NULL;
15282
15283                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15284                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15285                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15286         }
15287         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15288                 tp->read32_mbox = tg3_read32_mbox_5906;
15289                 tp->write32_mbox = tg3_write32_mbox_5906;
15290                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15291                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15292         }
15293
15294         if (tp->write32 == tg3_write_indirect_reg32 ||
15295             (tg3_flag(tp, PCIX_MODE) &&
15296              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15297               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15298                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15299
15300         /* The memory arbiter has to be enabled in order for SRAM accesses
15301          * to succeed.  Normally on powerup the tg3 chip firmware will make
15302          * sure it is enabled, but other entities such as system netboot
15303          * code might disable it.
15304          */
15305         val = tr32(MEMARB_MODE);
15306         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15307
15308         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15310             tg3_flag(tp, 5780_CLASS)) {
15311                 if (tg3_flag(tp, PCIX_MODE)) {
15312                         pci_read_config_dword(tp->pdev,
15313                                               tp->pcix_cap + PCI_X_STATUS,
15314                                               &val);
15315                         tp->pci_fn = val & 0x7;
15316                 }
15317         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15318                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15319                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15320                     NIC_SRAM_CPMUSTAT_SIG) {
15321                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15322                         tp->pci_fn = tp->pci_fn ? 1 : 0;
15323                 }
15324         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15325                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15326                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15327                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15328                     NIC_SRAM_CPMUSTAT_SIG) {
15329                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15330                                      TG3_CPMU_STATUS_FSHFT_5719;
15331                 }
15332         }
15333
15334         /* Get eeprom hw config before calling tg3_set_power_state().
15335          * In particular, the TG3_FLAG_IS_NIC flag must be
15336          * determined before calling tg3_set_power_state() so that
15337          * we know whether or not to switch out of Vaux power.
15338          * When the flag is set, it means that GPIO1 is used for eeprom
15339          * write protect and also implies that it is a LOM where GPIOs
15340          * are not used to switch power.
15341          */
15342         tg3_get_eeprom_hw_cfg(tp);
15343
15344         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15345                 tg3_flag_clear(tp, TSO_CAPABLE);
15346                 tg3_flag_clear(tp, TSO_BUG);
15347                 tp->fw_needed = NULL;
15348         }
15349
15350         if (tg3_flag(tp, ENABLE_APE)) {
15351                 /* Allow reads and writes to the
15352                  * APE register and memory space.
15353                  */
15354                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15355                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15356                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15357                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15358                                        pci_state_reg);
15359
15360                 tg3_ape_lock_init(tp);
15361         }
15362
15363         /* Set up tp->grc_local_ctrl before calling
15364          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15365          * will bring 5700's external PHY out of reset.
15366          * It is also used as eeprom write protect on LOMs.
15367          */
15368         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15369         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15370             tg3_flag(tp, EEPROM_WRITE_PROT))
15371                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15372                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15373         /* Unused GPIO3 must be driven as output on 5752 because there
15374          * are no pull-up resistors on unused GPIO pins.
15375          */
15376         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15377                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15378
15379         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15380             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15381             tg3_flag(tp, 57765_CLASS))
15382                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15383
15384         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15385             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15386                 /* Turn off the debug UART. */
15387                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15388                 if (tg3_flag(tp, IS_NIC))
15389                         /* Keep VMain power. */
15390                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15391                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15392         }
15393
15394         /* Switch out of Vaux if it is a NIC */
15395         tg3_pwrsrc_switch_to_vmain(tp);
15396
15397         /* Derive initial jumbo mode from MTU assigned in
15398          * ether_setup() via the alloc_etherdev() call
15399          */
15400         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15401                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15402
15403         /* Determine WakeOnLan speed to use. */
15404         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15405             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15406             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15407             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15408                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15409         } else {
15410                 tg3_flag_set(tp, WOL_SPEED_100MB);
15411         }
15412
15413         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15414                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15415
15416         /* A few boards don't want Ethernet@WireSpeed phy feature */
15417         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15418             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15419              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15420              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15421             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15422             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15423                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15424
15425         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15426             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15427                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15428         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15429                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15430
15431         if (tg3_flag(tp, 5705_PLUS) &&
15432             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15433             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15434             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15435             !tg3_flag(tp, 57765_PLUS)) {
15436                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15437                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15438                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15439                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15440                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15441                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15442                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15443                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15444                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15445                 } else
15446                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15447         }
15448
15449         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15450             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15451                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15452                 if (tp->phy_otp == 0)
15453                         tp->phy_otp = TG3_OTP_DEFAULT;
15454         }
15455
15456         if (tg3_flag(tp, CPMU_PRESENT))
15457                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15458         else
15459                 tp->mi_mode = MAC_MI_MODE_BASE;
15460
15461         tp->coalesce_mode = 0;
15462         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15463             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15464                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15465
15466         /* Set these bits to enable statistics workaround. */
15467         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15468             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15469             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15470                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15471                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15472         }
15473
15474         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15475             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15476                 tg3_flag_set(tp, USE_PHYLIB);
15477
15478         err = tg3_mdio_init(tp);
15479         if (err)
15480                 return err;
15481
15482         /* Initialize data/descriptor byte/word swapping. */
15483         val = tr32(GRC_MODE);
15484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15485                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15486                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15487                         GRC_MODE_B2HRX_ENABLE |
15488                         GRC_MODE_HTX2B_ENABLE |
15489                         GRC_MODE_HOST_STACKUP);
15490         else
15491                 val &= GRC_MODE_HOST_STACKUP;
15492
15493         tw32(GRC_MODE, val | tp->grc_mode);
15494
15495         tg3_switch_clocks(tp);
15496
15497         /* Clear this out for sanity. */
15498         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15499
15500         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15501                               &pci_state_reg);
15502         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15503             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15504                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15505
15506                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15507                     chiprevid == CHIPREV_ID_5701_B0 ||
15508                     chiprevid == CHIPREV_ID_5701_B2 ||
15509                     chiprevid == CHIPREV_ID_5701_B5) {
15510                         void __iomem *sram_base;
15511
15512                         /* Write some dummy words into the SRAM status block
15513                          * area, see if it reads back correctly.  If the return
15514                          * value is bad, force enable the PCIX workaround.
15515                          */
15516                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15517
15518                         writel(0x00000000, sram_base);
15519                         writel(0x00000000, sram_base + 4);
15520                         writel(0xffffffff, sram_base + 4);
15521                         if (readl(sram_base) != 0x00000000)
15522                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15523                 }
15524         }
15525
15526         udelay(50);
15527         tg3_nvram_init(tp);
15528
15529         grc_misc_cfg = tr32(GRC_MISC_CFG);
15530         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15531
15532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15533             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15534              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15535                 tg3_flag_set(tp, IS_5788);
15536
15537         if (!tg3_flag(tp, IS_5788) &&
15538             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15539                 tg3_flag_set(tp, TAGGED_STATUS);
15540         if (tg3_flag(tp, TAGGED_STATUS)) {
15541                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15542                                       HOSTCC_MODE_CLRTICK_TXBD);
15543
15544                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15545                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15546                                        tp->misc_host_ctrl);
15547         }
15548
15549         /* Preserve the APE MAC_MODE bits */
15550         if (tg3_flag(tp, ENABLE_APE))
15551                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15552         else
15553                 tp->mac_mode = 0;
15554
15555         if (tg3_10_100_only_device(tp, ent))
15556                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15557
15558         err = tg3_phy_probe(tp);
15559         if (err) {
15560                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15561                 /* ... but do not return immediately ... */
15562                 tg3_mdio_fini(tp);
15563         }
15564
15565         tg3_read_vpd(tp);
15566         tg3_read_fw_ver(tp);
15567
15568         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15569                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15570         } else {
15571                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15572                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15573                 else
15574                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15575         }
15576
15577         /* 5700 {AX,BX} chips have a broken status block link
15578          * change bit implementation, so we must use the
15579          * status register in those cases.
15580          */
15581         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15582                 tg3_flag_set(tp, USE_LINKCHG_REG);
15583         else
15584                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15585
15586         /* The led_ctrl is set during tg3_phy_probe, here we might
15587          * have to force the link status polling mechanism based
15588          * upon subsystem IDs.
15589          */
15590         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15591             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15592             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15593                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15594                 tg3_flag_set(tp, USE_LINKCHG_REG);
15595         }
15596
15597         /* For all SERDES we poll the MAC status register. */
15598         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15599                 tg3_flag_set(tp, POLL_SERDES);
15600         else
15601                 tg3_flag_clear(tp, POLL_SERDES);
15602
15603         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15604         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15605         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15606             tg3_flag(tp, PCIX_MODE)) {
15607                 tp->rx_offset = NET_SKB_PAD;
15608 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15609                 tp->rx_copy_thresh = ~(u16)0;
15610 #endif
15611         }
15612
15613         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15614         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15615         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15616
15617         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15618
15619         /* Increment the rx prod index on the rx std ring by at most
15620          * 8 for these chips to workaround hw errata.
15621          */
15622         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15624             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15625                 tp->rx_std_max_post = 8;
15626
15627         if (tg3_flag(tp, ASPM_WORKAROUND))
15628                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15629                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15630
15631         return err;
15632 }
15633
15634 #ifdef CONFIG_SPARC
15635 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15636 {
15637         struct net_device *dev = tp->dev;
15638         struct pci_dev *pdev = tp->pdev;
15639         struct device_node *dp = pci_device_to_OF_node(pdev);
15640         const unsigned char *addr;
15641         int len;
15642
15643         addr = of_get_property(dp, "local-mac-address", &len);
15644         if (addr && len == 6) {
15645                 memcpy(dev->dev_addr, addr, 6);
15646                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15647                 return 0;
15648         }
15649         return -ENODEV;
15650 }
15651
15652 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15653 {
15654         struct net_device *dev = tp->dev;
15655
15656         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15657         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15658         return 0;
15659 }
15660 #endif
15661
15662 static int tg3_get_device_address(struct tg3 *tp)
15663 {
15664         struct net_device *dev = tp->dev;
15665         u32 hi, lo, mac_offset;
15666         int addr_ok = 0;
15667
15668 #ifdef CONFIG_SPARC
15669         if (!tg3_get_macaddr_sparc(tp))
15670                 return 0;
15671 #endif
15672
15673         mac_offset = 0x7c;
15674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15675             tg3_flag(tp, 5780_CLASS)) {
15676                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15677                         mac_offset = 0xcc;
15678                 if (tg3_nvram_lock(tp))
15679                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15680                 else
15681                         tg3_nvram_unlock(tp);
15682         } else if (tg3_flag(tp, 5717_PLUS)) {
15683                 if (tp->pci_fn & 1)
15684                         mac_offset = 0xcc;
15685                 if (tp->pci_fn > 1)
15686                         mac_offset += 0x18c;
15687         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15688                 mac_offset = 0x10;
15689
15690         /* First try to get it from MAC address mailbox. */
15691         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15692         if ((hi >> 16) == 0x484b) {
15693                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15694                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15695
15696                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15697                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15698                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15699                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15700                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15701
15702                 /* Some old bootcode may report a 0 MAC address in SRAM */
15703                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15704         }
15705         if (!addr_ok) {
15706                 /* Next, try NVRAM. */
15707                 if (!tg3_flag(tp, NO_NVRAM) &&
15708                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15709                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15710                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15711                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15712                 }
15713                 /* Finally just fetch it out of the MAC control regs. */
15714                 else {
15715                         hi = tr32(MAC_ADDR_0_HIGH);
15716                         lo = tr32(MAC_ADDR_0_LOW);
15717
15718                         dev->dev_addr[5] = lo & 0xff;
15719                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15720                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15721                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15722                         dev->dev_addr[1] = hi & 0xff;
15723                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15724                 }
15725         }
15726
15727         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15728 #ifdef CONFIG_SPARC
15729                 if (!tg3_get_default_macaddr_sparc(tp))
15730                         return 0;
15731 #endif
15732                 return -EINVAL;
15733         }
15734         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15735         return 0;
15736 }
15737
15738 #define BOUNDARY_SINGLE_CACHELINE       1
15739 #define BOUNDARY_MULTI_CACHELINE        2
15740
15741 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15742 {
15743         int cacheline_size;
15744         u8 byte;
15745         int goal;
15746
15747         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15748         if (byte == 0)
15749                 cacheline_size = 1024;
15750         else
15751                 cacheline_size = (int) byte * 4;
15752
15753         /* On 5703 and later chips, the boundary bits have no
15754          * effect.
15755          */
15756         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15757             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15758             !tg3_flag(tp, PCI_EXPRESS))
15759                 goto out;
15760
15761 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15762         goal = BOUNDARY_MULTI_CACHELINE;
15763 #else
15764 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15765         goal = BOUNDARY_SINGLE_CACHELINE;
15766 #else
15767         goal = 0;
15768 #endif
15769 #endif
15770
15771         if (tg3_flag(tp, 57765_PLUS)) {
15772                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15773                 goto out;
15774         }
15775
15776         if (!goal)
15777                 goto out;
15778
15779         /* PCI controllers on most RISC systems tend to disconnect
15780          * when a device tries to burst across a cache-line boundary.
15781          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15782          *
15783          * Unfortunately, for PCI-E there are only limited
15784          * write-side controls for this, and thus for reads
15785          * we will still get the disconnects.  We'll also waste
15786          * these PCI cycles for both read and write for chips
15787          * other than 5700 and 5701 which do not implement the
15788          * boundary bits.
15789          */
15790         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15791                 switch (cacheline_size) {
15792                 case 16:
15793                 case 32:
15794                 case 64:
15795                 case 128:
15796                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15797                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15798                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15799                         } else {
15800                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15801                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15802                         }
15803                         break;
15804
15805                 case 256:
15806                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15807                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15808                         break;
15809
15810                 default:
15811                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15812                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15813                         break;
15814                 }
15815         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15816                 switch (cacheline_size) {
15817                 case 16:
15818                 case 32:
15819                 case 64:
15820                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15821                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15822                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15823                                 break;
15824                         }
15825                         /* fallthrough */
15826                 case 128:
15827                 default:
15828                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15829                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15830                         break;
15831                 }
15832         } else {
15833                 switch (cacheline_size) {
15834                 case 16:
15835                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15836                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15837                                         DMA_RWCTRL_WRITE_BNDRY_16);
15838                                 break;
15839                         }
15840                         /* fallthrough */
15841                 case 32:
15842                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15843                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15844                                         DMA_RWCTRL_WRITE_BNDRY_32);
15845                                 break;
15846                         }
15847                         /* fallthrough */
15848                 case 64:
15849                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15850                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15851                                         DMA_RWCTRL_WRITE_BNDRY_64);
15852                                 break;
15853                         }
15854                         /* fallthrough */
15855                 case 128:
15856                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15857                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15858                                         DMA_RWCTRL_WRITE_BNDRY_128);
15859                                 break;
15860                         }
15861                         /* fallthrough */
15862                 case 256:
15863                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15864                                 DMA_RWCTRL_WRITE_BNDRY_256);
15865                         break;
15866                 case 512:
15867                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15868                                 DMA_RWCTRL_WRITE_BNDRY_512);
15869                         break;
15870                 case 1024:
15871                 default:
15872                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15873                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15874                         break;
15875                 }
15876         }
15877
15878 out:
15879         return val;
15880 }
15881
15882 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15883                            int size, int to_device)
15884 {
15885         struct tg3_internal_buffer_desc test_desc;
15886         u32 sram_dma_descs;
15887         int i, ret;
15888
15889         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15890
15891         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15892         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15893         tw32(RDMAC_STATUS, 0);
15894         tw32(WDMAC_STATUS, 0);
15895
15896         tw32(BUFMGR_MODE, 0);
15897         tw32(FTQ_RESET, 0);
15898
15899         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15900         test_desc.addr_lo = buf_dma & 0xffffffff;
15901         test_desc.nic_mbuf = 0x00002100;
15902         test_desc.len = size;
15903
15904         /*
15905          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15906          * the *second* time the tg3 driver was getting loaded after an
15907          * initial scan.
15908          *
15909          * Broadcom tells me:
15910          *   ...the DMA engine is connected to the GRC block and a DMA
15911          *   reset may affect the GRC block in some unpredictable way...
15912          *   The behavior of resets to individual blocks has not been tested.
15913          *
15914          * Broadcom noted the GRC reset will also reset all sub-components.
15915          */
15916         if (to_device) {
15917                 test_desc.cqid_sqid = (13 << 8) | 2;
15918
15919                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15920                 udelay(40);
15921         } else {
15922                 test_desc.cqid_sqid = (16 << 8) | 7;
15923
15924                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15925                 udelay(40);
15926         }
15927         test_desc.flags = 0x00000005;
15928
15929         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15930                 u32 val;
15931
15932                 val = *(((u32 *)&test_desc) + i);
15933                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15934                                        sram_dma_descs + (i * sizeof(u32)));
15935                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15936         }
15937         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15938
15939         if (to_device)
15940                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15941         else
15942                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15943
15944         ret = -ENODEV;
15945         for (i = 0; i < 40; i++) {
15946                 u32 val;
15947
15948                 if (to_device)
15949                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15950                 else
15951                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15952                 if ((val & 0xffff) == sram_dma_descs) {
15953                         ret = 0;
15954                         break;
15955                 }
15956
15957                 udelay(100);
15958         }
15959
15960         return ret;
15961 }
15962
15963 #define TEST_BUFFER_SIZE        0x2000
15964
15965 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15966         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15967         { },
15968 };
15969
15970 static int tg3_test_dma(struct tg3 *tp)
15971 {
15972         dma_addr_t buf_dma;
15973         u32 *buf, saved_dma_rwctrl;
15974         int ret = 0;
15975
15976         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15977                                  &buf_dma, GFP_KERNEL);
15978         if (!buf) {
15979                 ret = -ENOMEM;
15980                 goto out_nofree;
15981         }
15982
15983         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15984                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15985
15986         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15987
15988         if (tg3_flag(tp, 57765_PLUS))
15989                 goto out;
15990
15991         if (tg3_flag(tp, PCI_EXPRESS)) {
15992                 /* DMA read watermark not used on PCIE */
15993                 tp->dma_rwctrl |= 0x00180000;
15994         } else if (!tg3_flag(tp, PCIX_MODE)) {
15995                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15996                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15997                         tp->dma_rwctrl |= 0x003f0000;
15998                 else
15999                         tp->dma_rwctrl |= 0x003f000f;
16000         } else {
16001                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16002                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
16003                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16004                         u32 read_water = 0x7;
16005
16006                         /* If the 5704 is behind the EPB bridge, we can
16007                          * do the less restrictive ONE_DMA workaround for
16008                          * better performance.
16009                          */
16010                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16011                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16012                                 tp->dma_rwctrl |= 0x8000;
16013                         else if (ccval == 0x6 || ccval == 0x7)
16014                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16015
16016                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
16017                                 read_water = 4;
16018                         /* Set bit 23 to enable PCIX hw bug fix */
16019                         tp->dma_rwctrl |=
16020                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16021                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16022                                 (1 << 23);
16023                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
16024                         /* 5780 always in PCIX mode */
16025                         tp->dma_rwctrl |= 0x00144000;
16026                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
16027                         /* 5714 always in PCIX mode */
16028                         tp->dma_rwctrl |= 0x00148000;
16029                 } else {
16030                         tp->dma_rwctrl |= 0x001b000f;
16031                 }
16032         }
16033
16034         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
16035             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
16036                 tp->dma_rwctrl &= 0xfffffff0;
16037
16038         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
16039             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
16040                 /* Remove this if it causes problems for some boards. */
16041                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16042
16043                 /* On 5700/5701 chips, we need to set this bit.
16044                  * Otherwise the chip will issue cacheline transactions
16045                  * to streamable DMA memory with not all the byte
16046                  * enables turned on.  This is an error on several
16047                  * RISC PCI controllers, in particular sparc64.
16048                  *
16049                  * On 5703/5704 chips, this bit has been reassigned
16050                  * a different meaning.  In particular, it is used
16051                  * on those chips to enable a PCI-X workaround.
16052                  */
16053                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16054         }
16055
16056         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16057
16058 #if 0
16059         /* Unneeded, already done by tg3_get_invariants.  */
16060         tg3_switch_clocks(tp);
16061 #endif
16062
16063         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
16064             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
16065                 goto out;
16066
16067         /* It is best to perform DMA test with maximum write burst size
16068          * to expose the 5700/5701 write DMA bug.
16069          */
16070         saved_dma_rwctrl = tp->dma_rwctrl;
16071         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16072         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16073
16074         while (1) {
16075                 u32 *p = buf, i;
16076
16077                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16078                         p[i] = i;
16079
16080                 /* Send the buffer to the chip. */
16081                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
16082                 if (ret) {
16083                         dev_err(&tp->pdev->dev,
16084                                 "%s: Buffer write failed. err = %d\n",
16085                                 __func__, ret);
16086                         break;
16087                 }
16088
16089 #if 0
16090                 /* validate data reached card RAM correctly. */
16091                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16092                         u32 val;
16093                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16094                         if (le32_to_cpu(val) != p[i]) {
16095                                 dev_err(&tp->pdev->dev,
16096                                         "%s: Buffer corrupted on device! "
16097                                         "(%d != %d)\n", __func__, val, i);
16098                                 /* ret = -ENODEV here? */
16099                         }
16100                         p[i] = 0;
16101                 }
16102 #endif
16103                 /* Now read it back. */
16104                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
16105                 if (ret) {
16106                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16107                                 "err = %d\n", __func__, ret);
16108                         break;
16109                 }
16110
16111                 /* Verify it. */
16112                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16113                         if (p[i] == i)
16114                                 continue;
16115
16116                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16117                             DMA_RWCTRL_WRITE_BNDRY_16) {
16118                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16119                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16120                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16121                                 break;
16122                         } else {
16123                                 dev_err(&tp->pdev->dev,
16124                                         "%s: Buffer corrupted on read back! "
16125                                         "(%d != %d)\n", __func__, p[i], i);
16126                                 ret = -ENODEV;
16127                                 goto out;
16128                         }
16129                 }
16130
16131                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16132                         /* Success. */
16133                         ret = 0;
16134                         break;
16135                 }
16136         }
16137         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16138             DMA_RWCTRL_WRITE_BNDRY_16) {
16139                 /* DMA test passed without adjusting DMA boundary,
16140                  * now look for chipsets that are known to expose the
16141                  * DMA bug without failing the test.
16142                  */
16143                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16144                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16145                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16146                 } else {
16147                         /* Safe to use the calculated DMA boundary. */
16148                         tp->dma_rwctrl = saved_dma_rwctrl;
16149                 }
16150
16151                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16152         }
16153
16154 out:
16155         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16156 out_nofree:
16157         return ret;
16158 }
16159
16160 static void tg3_init_bufmgr_config(struct tg3 *tp)
16161 {
16162         if (tg3_flag(tp, 57765_PLUS)) {
16163                 tp->bufmgr_config.mbuf_read_dma_low_water =
16164                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16165                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16166                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16167                 tp->bufmgr_config.mbuf_high_water =
16168                         DEFAULT_MB_HIGH_WATER_57765;
16169
16170                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16171                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16172                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16173                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
16174                 tp->bufmgr_config.mbuf_high_water_jumbo =
16175                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
16176         } else if (tg3_flag(tp, 5705_PLUS)) {
16177                 tp->bufmgr_config.mbuf_read_dma_low_water =
16178                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16179                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16180                         DEFAULT_MB_MACRX_LOW_WATER_5705;
16181                 tp->bufmgr_config.mbuf_high_water =
16182                         DEFAULT_MB_HIGH_WATER_5705;
16183                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
16184                         tp->bufmgr_config.mbuf_mac_rx_low_water =
16185                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
16186                         tp->bufmgr_config.mbuf_high_water =
16187                                 DEFAULT_MB_HIGH_WATER_5906;
16188                 }
16189
16190                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16191                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
16192                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16193                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
16194                 tp->bufmgr_config.mbuf_high_water_jumbo =
16195                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
16196         } else {
16197                 tp->bufmgr_config.mbuf_read_dma_low_water =
16198                         DEFAULT_MB_RDMA_LOW_WATER;
16199                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16200                         DEFAULT_MB_MACRX_LOW_WATER;
16201                 tp->bufmgr_config.mbuf_high_water =
16202                         DEFAULT_MB_HIGH_WATER;
16203
16204                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
16205                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
16206                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
16207                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
16208                 tp->bufmgr_config.mbuf_high_water_jumbo =
16209                         DEFAULT_MB_HIGH_WATER_JUMBO;
16210         }
16211
16212         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
16213         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
16214 }
16215
16216 static char *tg3_phy_string(struct tg3 *tp)
16217 {
16218         switch (tp->phy_id & TG3_PHY_ID_MASK) {
16219         case TG3_PHY_ID_BCM5400:        return "5400";
16220         case TG3_PHY_ID_BCM5401:        return "5401";
16221         case TG3_PHY_ID_BCM5411:        return "5411";
16222         case TG3_PHY_ID_BCM5701:        return "5701";
16223         case TG3_PHY_ID_BCM5703:        return "5703";
16224         case TG3_PHY_ID_BCM5704:        return "5704";
16225         case TG3_PHY_ID_BCM5705:        return "5705";
16226         case TG3_PHY_ID_BCM5750:        return "5750";
16227         case TG3_PHY_ID_BCM5752:        return "5752";
16228         case TG3_PHY_ID_BCM5714:        return "5714";
16229         case TG3_PHY_ID_BCM5780:        return "5780";
16230         case TG3_PHY_ID_BCM5755:        return "5755";
16231         case TG3_PHY_ID_BCM5787:        return "5787";
16232         case TG3_PHY_ID_BCM5784:        return "5784";
16233         case TG3_PHY_ID_BCM5756:        return "5722/5756";
16234         case TG3_PHY_ID_BCM5906:        return "5906";
16235         case TG3_PHY_ID_BCM5761:        return "5761";
16236         case TG3_PHY_ID_BCM5718C:       return "5718C";
16237         case TG3_PHY_ID_BCM5718S:       return "5718S";
16238         case TG3_PHY_ID_BCM57765:       return "57765";
16239         case TG3_PHY_ID_BCM5719C:       return "5719C";
16240         case TG3_PHY_ID_BCM5720C:       return "5720C";
16241         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
16242         case 0:                 return "serdes";
16243         default:                return "unknown";
16244         }
16245 }
16246
16247 static char *tg3_bus_string(struct tg3 *tp, char *str)
16248 {
16249         if (tg3_flag(tp, PCI_EXPRESS)) {
16250                 strcpy(str, "PCI Express");
16251                 return str;
16252         } else if (tg3_flag(tp, PCIX_MODE)) {
16253                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
16254
16255                 strcpy(str, "PCIX:");
16256
16257                 if ((clock_ctrl == 7) ||
16258                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
16259                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
16260                         strcat(str, "133MHz");
16261                 else if (clock_ctrl == 0)
16262                         strcat(str, "33MHz");
16263                 else if (clock_ctrl == 2)
16264                         strcat(str, "50MHz");
16265                 else if (clock_ctrl == 4)
16266                         strcat(str, "66MHz");
16267                 else if (clock_ctrl == 6)
16268                         strcat(str, "100MHz");
16269         } else {
16270                 strcpy(str, "PCI:");
16271                 if (tg3_flag(tp, PCI_HIGH_SPEED))
16272                         strcat(str, "66MHz");
16273                 else
16274                         strcat(str, "33MHz");
16275         }
16276         if (tg3_flag(tp, PCI_32BIT))
16277                 strcat(str, ":32-bit");
16278         else
16279                 strcat(str, ":64-bit");
16280         return str;
16281 }
16282
16283 static void tg3_init_coal(struct tg3 *tp)
16284 {
16285         struct ethtool_coalesce *ec = &tp->coal;
16286
16287         memset(ec, 0, sizeof(*ec));
16288         ec->cmd = ETHTOOL_GCOALESCE;
16289         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16290         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16291         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16292         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16293         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16294         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16295         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16296         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16297         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16298
16299         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16300                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16301                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16302                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16303                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16304                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16305         }
16306
16307         if (tg3_flag(tp, 5705_PLUS)) {
16308                 ec->rx_coalesce_usecs_irq = 0;
16309                 ec->tx_coalesce_usecs_irq = 0;
16310                 ec->stats_block_coalesce_usecs = 0;
16311         }
16312 }
16313
16314 static int tg3_init_one(struct pci_dev *pdev,
16315                                   const struct pci_device_id *ent)
16316 {
16317         struct net_device *dev;
16318         struct tg3 *tp;
16319         int i, err, pm_cap;
16320         u32 sndmbx, rcvmbx, intmbx;
16321         char str[40];
16322         u64 dma_mask, persist_dma_mask;
16323         netdev_features_t features = 0;
16324
16325         printk_once(KERN_INFO "%s\n", version);
16326
16327         err = pci_enable_device(pdev);
16328         if (err) {
16329                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16330                 return err;
16331         }
16332
16333         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16334         if (err) {
16335                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16336                 goto err_out_disable_pdev;
16337         }
16338
16339         pci_set_master(pdev);
16340
16341         /* Find power-management capability. */
16342         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16343         if (pm_cap == 0) {
16344                 dev_err(&pdev->dev,
16345                         "Cannot find Power Management capability, aborting\n");
16346                 err = -EIO;
16347                 goto err_out_free_res;
16348         }
16349
16350         err = pci_set_power_state(pdev, PCI_D0);
16351         if (err) {
16352                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16353                 goto err_out_free_res;
16354         }
16355
16356         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16357         if (!dev) {
16358                 err = -ENOMEM;
16359                 goto err_out_power_down;
16360         }
16361
16362         SET_NETDEV_DEV(dev, &pdev->dev);
16363
16364         tp = netdev_priv(dev);
16365         tp->pdev = pdev;
16366         tp->dev = dev;
16367         tp->pm_cap = pm_cap;
16368         tp->rx_mode = TG3_DEF_RX_MODE;
16369         tp->tx_mode = TG3_DEF_TX_MODE;
16370
16371         if (tg3_debug > 0)
16372                 tp->msg_enable = tg3_debug;
16373         else
16374                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16375
16376         /* The word/byte swap controls here control register access byte
16377          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16378          * setting below.
16379          */
16380         tp->misc_host_ctrl =
16381                 MISC_HOST_CTRL_MASK_PCI_INT |
16382                 MISC_HOST_CTRL_WORD_SWAP |
16383                 MISC_HOST_CTRL_INDIR_ACCESS |
16384                 MISC_HOST_CTRL_PCISTATE_RW;
16385
16386         /* The NONFRM (non-frame) byte/word swap controls take effect
16387          * on descriptor entries, anything which isn't packet data.
16388          *
16389          * The StrongARM chips on the board (one for tx, one for rx)
16390          * are running in big-endian mode.
16391          */
16392         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16393                         GRC_MODE_WSWAP_NONFRM_DATA);
16394 #ifdef __BIG_ENDIAN
16395         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16396 #endif
16397         spin_lock_init(&tp->lock);
16398         spin_lock_init(&tp->indirect_lock);
16399         INIT_WORK(&tp->reset_task, tg3_reset_task);
16400
16401         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16402         if (!tp->regs) {
16403                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16404                 err = -ENOMEM;
16405                 goto err_out_free_dev;
16406         }
16407
16408         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16409             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16410             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16411             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16412             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16413             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16414             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16415             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16416             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16417                 tg3_flag_set(tp, ENABLE_APE);
16418                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16419                 if (!tp->aperegs) {
16420                         dev_err(&pdev->dev,
16421                                 "Cannot map APE registers, aborting\n");
16422                         err = -ENOMEM;
16423                         goto err_out_iounmap;
16424                 }
16425         }
16426
16427         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16428         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16429
16430         dev->ethtool_ops = &tg3_ethtool_ops;
16431         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16432         dev->netdev_ops = &tg3_netdev_ops;
16433         dev->irq = pdev->irq;
16434
16435         err = tg3_get_invariants(tp, ent);
16436         if (err) {
16437                 dev_err(&pdev->dev,
16438                         "Problem fetching invariants of chip, aborting\n");
16439                 goto err_out_apeunmap;
16440         }
16441
16442         /* The EPB bridge inside 5714, 5715, and 5780 and any
16443          * device behind the EPB cannot support DMA addresses > 40-bit.
16444          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16445          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16446          * do DMA address check in tg3_start_xmit().
16447          */
16448         if (tg3_flag(tp, IS_5788))
16449                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16450         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16451                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16452 #ifdef CONFIG_HIGHMEM
16453                 dma_mask = DMA_BIT_MASK(64);
16454 #endif
16455         } else
16456                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16457
16458         /* Configure DMA attributes. */
16459         if (dma_mask > DMA_BIT_MASK(32)) {
16460                 err = pci_set_dma_mask(pdev, dma_mask);
16461                 if (!err) {
16462                         features |= NETIF_F_HIGHDMA;
16463                         err = pci_set_consistent_dma_mask(pdev,
16464                                                           persist_dma_mask);
16465                         if (err < 0) {
16466                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16467                                         "DMA for consistent allocations\n");
16468                                 goto err_out_apeunmap;
16469                         }
16470                 }
16471         }
16472         if (err || dma_mask == DMA_BIT_MASK(32)) {
16473                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16474                 if (err) {
16475                         dev_err(&pdev->dev,
16476                                 "No usable DMA configuration, aborting\n");
16477                         goto err_out_apeunmap;
16478                 }
16479         }
16480
16481         tg3_init_bufmgr_config(tp);
16482
16483         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16484
16485         /* 5700 B0 chips do not support checksumming correctly due
16486          * to hardware bugs.
16487          */
16488         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16489                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16490
16491                 if (tg3_flag(tp, 5755_PLUS))
16492                         features |= NETIF_F_IPV6_CSUM;
16493         }
16494
16495         /* TSO is on by default on chips that support hardware TSO.
16496          * Firmware TSO on older chips gives lower performance, so it
16497          * is off by default, but can be enabled using ethtool.
16498          */
16499         if ((tg3_flag(tp, HW_TSO_1) ||
16500              tg3_flag(tp, HW_TSO_2) ||
16501              tg3_flag(tp, HW_TSO_3)) &&
16502             (features & NETIF_F_IP_CSUM))
16503                 features |= NETIF_F_TSO;
16504         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16505                 if (features & NETIF_F_IPV6_CSUM)
16506                         features |= NETIF_F_TSO6;
16507                 if (tg3_flag(tp, HW_TSO_3) ||
16508                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16509                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16510                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16511                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16512                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16513                         features |= NETIF_F_TSO_ECN;
16514         }
16515
16516         dev->features |= features;
16517         dev->vlan_features |= features;
16518
16519         /*
16520          * Add loopback capability only for a subset of devices that support
16521          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16522          * loopback for the remaining devices.
16523          */
16524         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16525             !tg3_flag(tp, CPMU_PRESENT))
16526                 /* Add the loopback capability */
16527                 features |= NETIF_F_LOOPBACK;
16528
16529         dev->hw_features |= features;
16530
16531         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16532             !tg3_flag(tp, TSO_CAPABLE) &&
16533             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16534                 tg3_flag_set(tp, MAX_RXPEND_64);
16535                 tp->rx_pending = 63;
16536         }
16537
16538         err = tg3_get_device_address(tp);
16539         if (err) {
16540                 dev_err(&pdev->dev,
16541                         "Could not obtain valid ethernet address, aborting\n");
16542                 goto err_out_apeunmap;
16543         }
16544
16545         /*
16546          * Reset chip in case UNDI or EFI driver did not shutdown
16547          * DMA self test will enable WDMAC and we'll see (spurious)
16548          * pending DMA on the PCI bus at that point.
16549          */
16550         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16551             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16552                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16553                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16554         }
16555
16556         err = tg3_test_dma(tp);
16557         if (err) {
16558                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16559                 goto err_out_apeunmap;
16560         }
16561
16562         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16563         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16564         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16565         for (i = 0; i < tp->irq_max; i++) {
16566                 struct tg3_napi *tnapi = &tp->napi[i];
16567
16568                 tnapi->tp = tp;
16569                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16570
16571                 tnapi->int_mbox = intmbx;
16572                 if (i <= 4)
16573                         intmbx += 0x8;
16574                 else
16575                         intmbx += 0x4;
16576
16577                 tnapi->consmbox = rcvmbx;
16578                 tnapi->prodmbox = sndmbx;
16579
16580                 if (i)
16581                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16582                 else
16583                         tnapi->coal_now = HOSTCC_MODE_NOW;
16584
16585                 if (!tg3_flag(tp, SUPPORT_MSIX))
16586                         break;
16587
16588                 /*
16589                  * If we support MSIX, we'll be using RSS.  If we're using
16590                  * RSS, the first vector only handles link interrupts and the
16591                  * remaining vectors handle rx and tx interrupts.  Reuse the
16592                  * mailbox values for the next iteration.  The values we setup
16593                  * above are still useful for the single vectored mode.
16594                  */
16595                 if (!i)
16596                         continue;
16597
16598                 rcvmbx += 0x8;
16599
16600                 if (sndmbx & 0x4)
16601                         sndmbx -= 0x4;
16602                 else
16603                         sndmbx += 0xc;
16604         }
16605
16606         tg3_init_coal(tp);
16607
16608         pci_set_drvdata(pdev, dev);
16609
16610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
16611             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
16612                 tg3_flag_set(tp, PTP_CAPABLE);
16613
16614         if (tg3_flag(tp, 5717_PLUS)) {
16615                 /* Resume a low-power mode */
16616                 tg3_frob_aux_power(tp, false);
16617         }
16618
16619         tg3_timer_init(tp);
16620
16621         err = register_netdev(dev);
16622         if (err) {
16623                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16624                 goto err_out_apeunmap;
16625         }
16626
16627         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16628                     tp->board_part_number,
16629                     tp->pci_chip_rev_id,
16630                     tg3_bus_string(tp, str),
16631                     dev->dev_addr);
16632
16633         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16634                 struct phy_device *phydev;
16635                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16636                 netdev_info(dev,
16637                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16638                             phydev->drv->name, dev_name(&phydev->dev));
16639         } else {
16640                 char *ethtype;
16641
16642                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16643                         ethtype = "10/100Base-TX";
16644                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16645                         ethtype = "1000Base-SX";
16646                 else
16647                         ethtype = "10/100/1000Base-T";
16648
16649                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16650                             "(WireSpeed[%d], EEE[%d])\n",
16651                             tg3_phy_string(tp), ethtype,
16652                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16653                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16654         }
16655
16656         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16657                     (dev->features & NETIF_F_RXCSUM) != 0,
16658                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16659                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16660                     tg3_flag(tp, ENABLE_ASF) != 0,
16661                     tg3_flag(tp, TSO_CAPABLE) != 0);
16662         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16663                     tp->dma_rwctrl,
16664                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16665                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16666
16667         pci_save_state(pdev);
16668
16669         return 0;
16670
16671 err_out_apeunmap:
16672         if (tp->aperegs) {
16673                 iounmap(tp->aperegs);
16674                 tp->aperegs = NULL;
16675         }
16676
16677 err_out_iounmap:
16678         if (tp->regs) {
16679                 iounmap(tp->regs);
16680                 tp->regs = NULL;
16681         }
16682
16683 err_out_free_dev:
16684         free_netdev(dev);
16685
16686 err_out_power_down:
16687         pci_set_power_state(pdev, PCI_D3hot);
16688
16689 err_out_free_res:
16690         pci_release_regions(pdev);
16691
16692 err_out_disable_pdev:
16693         pci_disable_device(pdev);
16694         pci_set_drvdata(pdev, NULL);
16695         return err;
16696 }
16697
16698 static void tg3_remove_one(struct pci_dev *pdev)
16699 {
16700         struct net_device *dev = pci_get_drvdata(pdev);
16701
16702         if (dev) {
16703                 struct tg3 *tp = netdev_priv(dev);
16704
16705                 release_firmware(tp->fw);
16706
16707                 tg3_reset_task_cancel(tp);
16708
16709                 if (tg3_flag(tp, USE_PHYLIB)) {
16710                         tg3_phy_fini(tp);
16711                         tg3_mdio_fini(tp);
16712                 }
16713
16714                 unregister_netdev(dev);
16715                 if (tp->aperegs) {
16716                         iounmap(tp->aperegs);
16717                         tp->aperegs = NULL;
16718                 }
16719                 if (tp->regs) {
16720                         iounmap(tp->regs);
16721                         tp->regs = NULL;
16722                 }
16723                 free_netdev(dev);
16724                 pci_release_regions(pdev);
16725                 pci_disable_device(pdev);
16726                 pci_set_drvdata(pdev, NULL);
16727         }
16728 }
16729
16730 #ifdef CONFIG_PM_SLEEP
16731 static int tg3_suspend(struct device *device)
16732 {
16733         struct pci_dev *pdev = to_pci_dev(device);
16734         struct net_device *dev = pci_get_drvdata(pdev);
16735         struct tg3 *tp = netdev_priv(dev);
16736         int err;
16737
16738         if (!netif_running(dev))
16739                 return 0;
16740
16741         tg3_reset_task_cancel(tp);
16742         tg3_phy_stop(tp);
16743         tg3_netif_stop(tp);
16744
16745         tg3_timer_stop(tp);
16746
16747         tg3_full_lock(tp, 1);
16748         tg3_disable_ints(tp);
16749         tg3_full_unlock(tp);
16750
16751         netif_device_detach(dev);
16752
16753         tg3_full_lock(tp, 0);
16754         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16755         tg3_flag_clear(tp, INIT_COMPLETE);
16756         tg3_full_unlock(tp);
16757
16758         err = tg3_power_down_prepare(tp);
16759         if (err) {
16760                 int err2;
16761
16762                 tg3_full_lock(tp, 0);
16763
16764                 tg3_flag_set(tp, INIT_COMPLETE);
16765                 err2 = tg3_restart_hw(tp, 1);
16766                 if (err2)
16767                         goto out;
16768
16769                 tg3_timer_start(tp);
16770
16771                 netif_device_attach(dev);
16772                 tg3_netif_start(tp);
16773
16774 out:
16775                 tg3_full_unlock(tp);
16776
16777                 if (!err2)
16778                         tg3_phy_start(tp);
16779         }
16780
16781         return err;
16782 }
16783
16784 static int tg3_resume(struct device *device)
16785 {
16786         struct pci_dev *pdev = to_pci_dev(device);
16787         struct net_device *dev = pci_get_drvdata(pdev);
16788         struct tg3 *tp = netdev_priv(dev);
16789         int err;
16790
16791         if (!netif_running(dev))
16792                 return 0;
16793
16794         netif_device_attach(dev);
16795
16796         tg3_full_lock(tp, 0);
16797
16798         tg3_flag_set(tp, INIT_COMPLETE);
16799         err = tg3_restart_hw(tp, 1);
16800         if (err)
16801                 goto out;
16802
16803         tg3_timer_start(tp);
16804
16805         tg3_netif_start(tp);
16806
16807 out:
16808         tg3_full_unlock(tp);
16809
16810         if (!err)
16811                 tg3_phy_start(tp);
16812
16813         return err;
16814 }
16815
16816 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16817 #define TG3_PM_OPS (&tg3_pm_ops)
16818
16819 #else
16820
16821 #define TG3_PM_OPS NULL
16822
16823 #endif /* CONFIG_PM_SLEEP */
16824
16825 /**
16826  * tg3_io_error_detected - called when PCI error is detected
16827  * @pdev: Pointer to PCI device
16828  * @state: The current pci connection state
16829  *
16830  * This function is called after a PCI bus error affecting
16831  * this device has been detected.
16832  */
16833 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16834                                               pci_channel_state_t state)
16835 {
16836         struct net_device *netdev = pci_get_drvdata(pdev);
16837         struct tg3 *tp = netdev_priv(netdev);
16838         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16839
16840         netdev_info(netdev, "PCI I/O error detected\n");
16841
16842         rtnl_lock();
16843
16844         if (!netif_running(netdev))
16845                 goto done;
16846
16847         tg3_phy_stop(tp);
16848
16849         tg3_netif_stop(tp);
16850
16851         tg3_timer_stop(tp);
16852
16853         /* Want to make sure that the reset task doesn't run */
16854         tg3_reset_task_cancel(tp);
16855
16856         netif_device_detach(netdev);
16857
16858         /* Clean up software state, even if MMIO is blocked */
16859         tg3_full_lock(tp, 0);
16860         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16861         tg3_full_unlock(tp);
16862
16863 done:
16864         if (state == pci_channel_io_perm_failure)
16865                 err = PCI_ERS_RESULT_DISCONNECT;
16866         else
16867                 pci_disable_device(pdev);
16868
16869         rtnl_unlock();
16870
16871         return err;
16872 }
16873
16874 /**
16875  * tg3_io_slot_reset - called after the pci bus has been reset.
16876  * @pdev: Pointer to PCI device
16877  *
16878  * Restart the card from scratch, as if from a cold-boot.
16879  * At this point, the card has exprienced a hard reset,
16880  * followed by fixups by BIOS, and has its config space
16881  * set up identically to what it was at cold boot.
16882  */
16883 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16884 {
16885         struct net_device *netdev = pci_get_drvdata(pdev);
16886         struct tg3 *tp = netdev_priv(netdev);
16887         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16888         int err;
16889
16890         rtnl_lock();
16891
16892         if (pci_enable_device(pdev)) {
16893                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16894                 goto done;
16895         }
16896
16897         pci_set_master(pdev);
16898         pci_restore_state(pdev);
16899         pci_save_state(pdev);
16900
16901         if (!netif_running(netdev)) {
16902                 rc = PCI_ERS_RESULT_RECOVERED;
16903                 goto done;
16904         }
16905
16906         err = tg3_power_up(tp);
16907         if (err)
16908                 goto done;
16909
16910         rc = PCI_ERS_RESULT_RECOVERED;
16911
16912 done:
16913         rtnl_unlock();
16914
16915         return rc;
16916 }
16917
16918 /**
16919  * tg3_io_resume - called when traffic can start flowing again.
16920  * @pdev: Pointer to PCI device
16921  *
16922  * This callback is called when the error recovery driver tells
16923  * us that its OK to resume normal operation.
16924  */
16925 static void tg3_io_resume(struct pci_dev *pdev)
16926 {
16927         struct net_device *netdev = pci_get_drvdata(pdev);
16928         struct tg3 *tp = netdev_priv(netdev);
16929         int err;
16930
16931         rtnl_lock();
16932
16933         if (!netif_running(netdev))
16934                 goto done;
16935
16936         tg3_full_lock(tp, 0);
16937         tg3_flag_set(tp, INIT_COMPLETE);
16938         err = tg3_restart_hw(tp, 1);
16939         if (err) {
16940                 tg3_full_unlock(tp);
16941                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16942                 goto done;
16943         }
16944
16945         netif_device_attach(netdev);
16946
16947         tg3_timer_start(tp);
16948
16949         tg3_netif_start(tp);
16950
16951         tg3_full_unlock(tp);
16952
16953         tg3_phy_start(tp);
16954
16955 done:
16956         rtnl_unlock();
16957 }
16958
16959 static const struct pci_error_handlers tg3_err_handler = {
16960         .error_detected = tg3_io_error_detected,
16961         .slot_reset     = tg3_io_slot_reset,
16962         .resume         = tg3_io_resume
16963 };
16964
16965 static struct pci_driver tg3_driver = {
16966         .name           = DRV_MODULE_NAME,
16967         .id_table       = tg3_pci_tbl,
16968         .probe          = tg3_init_one,
16969         .remove         = tg3_remove_one,
16970         .err_handler    = &tg3_err_handler,
16971         .driver.pm      = TG3_PM_OPS,
16972 };
16973
16974 static int __init tg3_init(void)
16975 {
16976         return pci_register_driver(&tg3_driver);
16977 }
16978
16979 static void __exit tg3_cleanup(void)
16980 {
16981         pci_unregister_driver(&tg3_driver);
16982 }
16983
16984 module_init(tg3_init);
16985 module_exit(tg3_cleanup);