2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
751 /* Revoke the lock request. */
752 tg3_ape_write32(tp, gnt + off, bit);
759 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 if (!tg3_flag(tp, ENABLE_APE))
767 case TG3_APE_LOCK_GPIO:
768 if (tg3_asic_rev(tp) == ASIC_REV_5761)
770 case TG3_APE_LOCK_GRC:
771 case TG3_APE_LOCK_MEM:
773 bit = APE_LOCK_GRANT_DRIVER;
775 bit = 1 << tp->pci_fn;
777 case TG3_APE_LOCK_PHY0:
778 case TG3_APE_LOCK_PHY1:
779 case TG3_APE_LOCK_PHY2:
780 case TG3_APE_LOCK_PHY3:
781 bit = APE_LOCK_GRANT_DRIVER;
787 if (tg3_asic_rev(tp) == ASIC_REV_5761)
788 gnt = TG3_APE_LOCK_GRANT;
790 gnt = TG3_APE_PER_LOCK_GRANT;
792 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
795 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
800 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
803 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
804 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
807 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
810 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
813 return timeout_us ? 0 : -EBUSY;
816 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 for (i = 0; i < timeout_us / 10; i++) {
821 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
823 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
829 return i == timeout_us / 10;
832 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836 u32 i, bufoff, msgoff, maxlen, apedata;
838 if (!tg3_flag(tp, APE_HAS_NCSI))
841 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
842 if (apedata != APE_SEG_SIG_MAGIC)
845 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
846 if (!(apedata & APE_FW_STATUS_READY))
849 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
851 msgoff = bufoff + 2 * sizeof(u32);
852 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
857 /* Cap xfer sizes to scratchpad limits. */
858 length = (len > maxlen) ? maxlen : len;
861 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
862 if (!(apedata & APE_FW_STATUS_READY))
865 /* Wait for up to 1 msec for APE to service previous event. */
866 err = tg3_ape_event_lock(tp, 1000);
870 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
871 APE_EVENT_STATUS_SCRTCHPD_READ |
872 APE_EVENT_STATUS_EVENT_PENDING;
873 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
875 tg3_ape_write32(tp, bufoff, base_off);
876 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
878 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
879 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883 if (tg3_ape_wait_for_event(tp, 30000))
886 for (i = 0; length; i += 4, length -= 4) {
887 u32 val = tg3_ape_read32(tp, msgoff + i);
888 memcpy(data, &val, sizeof(u32));
896 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
901 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
902 if (apedata != APE_SEG_SIG_MAGIC)
905 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
906 if (!(apedata & APE_FW_STATUS_READY))
909 /* Wait for up to 1 millisecond for APE to service previous event. */
910 err = tg3_ape_event_lock(tp, 1000);
914 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
915 event | APE_EVENT_STATUS_EVENT_PENDING);
917 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
918 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
923 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
928 if (!tg3_flag(tp, ENABLE_APE))
932 case RESET_KIND_INIT:
933 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
934 APE_HOST_SEG_SIG_MAGIC);
935 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
936 APE_HOST_SEG_LEN_MAGIC);
937 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
938 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
939 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
940 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
941 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
942 APE_HOST_BEHAV_NO_PHYLOCK);
943 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
944 TG3_APE_HOST_DRVR_STATE_START);
946 event = APE_EVENT_STATUS_STATE_START;
948 case RESET_KIND_SHUTDOWN:
949 /* With the interface we are currently using,
950 * APE does not track driver state. Wiping
951 * out the HOST SEGMENT SIGNATURE forces
952 * the APE to assume OS absent status.
954 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
956 if (device_may_wakeup(&tp->pdev->dev) &&
957 tg3_flag(tp, WOL_ENABLE)) {
958 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
959 TG3_APE_HOST_WOL_SPEED_AUTO);
960 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
962 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
964 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
966 event = APE_EVENT_STATUS_STATE_UNLOAD;
972 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
974 tg3_ape_send_event(tp, event);
977 static void tg3_disable_ints(struct tg3 *tp)
981 tw32(TG3PCI_MISC_HOST_CTRL,
982 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
983 for (i = 0; i < tp->irq_max; i++)
984 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
987 static void tg3_enable_ints(struct tg3 *tp)
994 tw32(TG3PCI_MISC_HOST_CTRL,
995 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
997 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
998 for (i = 0; i < tp->irq_cnt; i++) {
999 struct tg3_napi *tnapi = &tp->napi[i];
1001 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1002 if (tg3_flag(tp, 1SHOT_MSI))
1003 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1005 tp->coal_now |= tnapi->coal_now;
1008 /* Force an initial interrupt */
1009 if (!tg3_flag(tp, TAGGED_STATUS) &&
1010 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1011 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1013 tw32(HOSTCC_MODE, tp->coal_now);
1015 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1018 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1020 struct tg3 *tp = tnapi->tp;
1021 struct tg3_hw_status *sblk = tnapi->hw_status;
1022 unsigned int work_exists = 0;
1024 /* check for phy events */
1025 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1026 if (sblk->status & SD_STATUS_LINK_CHG)
1030 /* check for TX work to do */
1031 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1034 /* check for RX work to do */
1035 if (tnapi->rx_rcb_prod_idx &&
1036 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043 * similar to tg3_enable_ints, but it accurately determines whether there
1044 * is new work pending and can return without flushing the PIO write
1045 * which reenables interrupts
1047 static void tg3_int_reenable(struct tg3_napi *tnapi)
1049 struct tg3 *tp = tnapi->tp;
1051 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1054 /* When doing tagged status, this work check is unnecessary.
1055 * The last_tag we write above tells the chip which piece of
1056 * work we've completed.
1058 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1059 tw32(HOSTCC_MODE, tp->coalesce_mode |
1060 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1063 static void tg3_switch_clocks(struct tg3 *tp)
1066 u32 orig_clock_ctrl;
1068 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1071 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1073 orig_clock_ctrl = clock_ctrl;
1074 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1075 CLOCK_CTRL_CLKRUN_OENABLE |
1077 tp->pci_clock_ctrl = clock_ctrl;
1079 if (tg3_flag(tp, 5705_PLUS)) {
1080 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1081 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1082 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1084 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1085 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1087 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1093 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1096 #define PHY_BUSY_LOOPS 5000
1098 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1107 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1111 tg3_ape_lock(tp, tp->phy_ape_lock);
1115 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1116 MI_COM_PHY_ADDR_MASK);
1117 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1118 MI_COM_REG_ADDR_MASK);
1119 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1121 tw32_f(MAC_MI_COM, frame_val);
1123 loops = PHY_BUSY_LOOPS;
1124 while (loops != 0) {
1126 frame_val = tr32(MAC_MI_COM);
1128 if ((frame_val & MI_COM_BUSY) == 0) {
1130 frame_val = tr32(MAC_MI_COM);
1138 *val = frame_val & MI_COM_DATA_MASK;
1142 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1143 tw32_f(MAC_MI_MODE, tp->mi_mode);
1147 tg3_ape_unlock(tp, tp->phy_ape_lock);
1152 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1154 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1157 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1165 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1168 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1170 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1174 tg3_ape_lock(tp, tp->phy_ape_lock);
1176 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1177 MI_COM_PHY_ADDR_MASK);
1178 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1179 MI_COM_REG_ADDR_MASK);
1180 frame_val |= (val & MI_COM_DATA_MASK);
1181 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1183 tw32_f(MAC_MI_COM, frame_val);
1185 loops = PHY_BUSY_LOOPS;
1186 while (loops != 0) {
1188 frame_val = tr32(MAC_MI_COM);
1189 if ((frame_val & MI_COM_BUSY) == 0) {
1191 frame_val = tr32(MAC_MI_COM);
1201 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1202 tw32_f(MAC_MI_MODE, tp->mi_mode);
1206 tg3_ape_unlock(tp, tp->phy_ape_lock);
1211 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1213 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1216 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1220 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1224 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1228 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1229 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1233 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1239 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1243 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1247 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1251 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1252 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1256 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1262 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1266 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1268 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1273 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1277 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1279 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1284 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1288 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1289 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1290 MII_TG3_AUXCTL_SHDWSEL_MISC);
1292 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1297 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1299 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1300 set |= MII_TG3_AUXCTL_MISC_WREN;
1302 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1305 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1310 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1316 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1318 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1320 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1321 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1326 static int tg3_bmcr_reset(struct tg3 *tp)
1331 /* OK, reset it, and poll the BMCR_RESET bit until it
1332 * clears or we time out.
1334 phy_control = BMCR_RESET;
1335 err = tg3_writephy(tp, MII_BMCR, phy_control);
1341 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1345 if ((phy_control & BMCR_RESET) == 0) {
1357 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1359 struct tg3 *tp = bp->priv;
1362 spin_lock_bh(&tp->lock);
1364 if (tg3_readphy(tp, reg, &val))
1367 spin_unlock_bh(&tp->lock);
1372 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1374 struct tg3 *tp = bp->priv;
1377 spin_lock_bh(&tp->lock);
1379 if (tg3_writephy(tp, reg, val))
1382 spin_unlock_bh(&tp->lock);
1387 static int tg3_mdio_reset(struct mii_bus *bp)
1392 static void tg3_mdio_config_5785(struct tg3 *tp)
1395 struct phy_device *phydev;
1397 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1398 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1399 case PHY_ID_BCM50610:
1400 case PHY_ID_BCM50610M:
1401 val = MAC_PHYCFG2_50610_LED_MODES;
1403 case PHY_ID_BCMAC131:
1404 val = MAC_PHYCFG2_AC131_LED_MODES;
1406 case PHY_ID_RTL8211C:
1407 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1409 case PHY_ID_RTL8201E:
1410 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1416 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1417 tw32(MAC_PHYCFG2, val);
1419 val = tr32(MAC_PHYCFG1);
1420 val &= ~(MAC_PHYCFG1_RGMII_INT |
1421 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1422 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1423 tw32(MAC_PHYCFG1, val);
1428 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1429 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1430 MAC_PHYCFG2_FMODE_MASK_MASK |
1431 MAC_PHYCFG2_GMODE_MASK_MASK |
1432 MAC_PHYCFG2_ACT_MASK_MASK |
1433 MAC_PHYCFG2_QUAL_MASK_MASK |
1434 MAC_PHYCFG2_INBAND_ENABLE;
1436 tw32(MAC_PHYCFG2, val);
1438 val = tr32(MAC_PHYCFG1);
1439 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1440 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1441 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1442 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1443 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1444 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1445 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1447 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1448 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1449 tw32(MAC_PHYCFG1, val);
1451 val = tr32(MAC_EXT_RGMII_MODE);
1452 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1453 MAC_RGMII_MODE_RX_QUALITY |
1454 MAC_RGMII_MODE_RX_ACTIVITY |
1455 MAC_RGMII_MODE_RX_ENG_DET |
1456 MAC_RGMII_MODE_TX_ENABLE |
1457 MAC_RGMII_MODE_TX_LOWPWR |
1458 MAC_RGMII_MODE_TX_RESET);
1459 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1460 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1461 val |= MAC_RGMII_MODE_RX_INT_B |
1462 MAC_RGMII_MODE_RX_QUALITY |
1463 MAC_RGMII_MODE_RX_ACTIVITY |
1464 MAC_RGMII_MODE_RX_ENG_DET;
1465 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1466 val |= MAC_RGMII_MODE_TX_ENABLE |
1467 MAC_RGMII_MODE_TX_LOWPWR |
1468 MAC_RGMII_MODE_TX_RESET;
1470 tw32(MAC_EXT_RGMII_MODE, val);
1473 static void tg3_mdio_start(struct tg3 *tp)
1475 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1476 tw32_f(MAC_MI_MODE, tp->mi_mode);
1479 if (tg3_flag(tp, MDIOBUS_INITED) &&
1480 tg3_asic_rev(tp) == ASIC_REV_5785)
1481 tg3_mdio_config_5785(tp);
1484 static int tg3_mdio_init(struct tg3 *tp)
1488 struct phy_device *phydev;
1490 if (tg3_flag(tp, 5717_PLUS)) {
1493 tp->phy_addr = tp->pci_fn + 1;
1495 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1496 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1498 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1499 TG3_CPMU_PHY_STRAP_IS_SERDES;
1503 tp->phy_addr = TG3_PHY_MII_ADDR;
1507 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1510 tp->mdio_bus = mdiobus_alloc();
1511 if (tp->mdio_bus == NULL)
1514 tp->mdio_bus->name = "tg3 mdio bus";
1515 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1516 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1517 tp->mdio_bus->priv = tp;
1518 tp->mdio_bus->parent = &tp->pdev->dev;
1519 tp->mdio_bus->read = &tg3_mdio_read;
1520 tp->mdio_bus->write = &tg3_mdio_write;
1521 tp->mdio_bus->reset = &tg3_mdio_reset;
1522 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1523 tp->mdio_bus->irq = &tp->mdio_irq[0];
1525 for (i = 0; i < PHY_MAX_ADDR; i++)
1526 tp->mdio_bus->irq[i] = PHY_POLL;
1528 /* The bus registration will look for all the PHYs on the mdio bus.
1529 * Unfortunately, it does not ensure the PHY is powered up before
1530 * accessing the PHY ID registers. A chip reset is the
1531 * quickest way to bring the device back to an operational state..
1533 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1536 i = mdiobus_register(tp->mdio_bus);
1538 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1539 mdiobus_free(tp->mdio_bus);
1543 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1545 if (!phydev || !phydev->drv) {
1546 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1547 mdiobus_unregister(tp->mdio_bus);
1548 mdiobus_free(tp->mdio_bus);
1552 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1553 case PHY_ID_BCM57780:
1554 phydev->interface = PHY_INTERFACE_MODE_GMII;
1555 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1557 case PHY_ID_BCM50610:
1558 case PHY_ID_BCM50610M:
1559 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1560 PHY_BRCM_RX_REFCLK_UNUSED |
1561 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1562 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1563 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1564 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1565 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1566 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1567 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1568 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1570 case PHY_ID_RTL8211C:
1571 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1573 case PHY_ID_RTL8201E:
1574 case PHY_ID_BCMAC131:
1575 phydev->interface = PHY_INTERFACE_MODE_MII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1577 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1581 tg3_flag_set(tp, MDIOBUS_INITED);
1583 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1584 tg3_mdio_config_5785(tp);
1589 static void tg3_mdio_fini(struct tg3 *tp)
1591 if (tg3_flag(tp, MDIOBUS_INITED)) {
1592 tg3_flag_clear(tp, MDIOBUS_INITED);
1593 mdiobus_unregister(tp->mdio_bus);
1594 mdiobus_free(tp->mdio_bus);
1598 /* tp->lock is held. */
1599 static inline void tg3_generate_fw_event(struct tg3 *tp)
1603 val = tr32(GRC_RX_CPU_EVENT);
1604 val |= GRC_RX_CPU_DRIVER_EVENT;
1605 tw32_f(GRC_RX_CPU_EVENT, val);
1607 tp->last_event_jiffies = jiffies;
1610 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1612 /* tp->lock is held. */
1613 static void tg3_wait_for_event_ack(struct tg3 *tp)
1616 unsigned int delay_cnt;
1619 /* If enough time has passed, no wait is necessary. */
1620 time_remain = (long)(tp->last_event_jiffies + 1 +
1621 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1623 if (time_remain < 0)
1626 /* Check if we can shorten the wait time. */
1627 delay_cnt = jiffies_to_usecs(time_remain);
1628 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1629 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1630 delay_cnt = (delay_cnt >> 3) + 1;
1632 for (i = 0; i < delay_cnt; i++) {
1633 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1639 /* tp->lock is held. */
1640 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1645 if (!tg3_readphy(tp, MII_BMCR, ®))
1647 if (!tg3_readphy(tp, MII_BMSR, ®))
1648 val |= (reg & 0xffff);
1652 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1654 if (!tg3_readphy(tp, MII_LPA, ®))
1655 val |= (reg & 0xffff);
1659 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1660 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1662 if (!tg3_readphy(tp, MII_STAT1000, ®))
1663 val |= (reg & 0xffff);
1667 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1674 /* tp->lock is held. */
1675 static void tg3_ump_link_report(struct tg3 *tp)
1679 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1682 tg3_phy_gather_ump_data(tp, data);
1684 tg3_wait_for_event_ack(tp);
1686 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1687 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1688 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1689 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1690 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1691 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1693 tg3_generate_fw_event(tp);
1696 /* tp->lock is held. */
1697 static void tg3_stop_fw(struct tg3 *tp)
1699 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1700 /* Wait for RX cpu to ACK the previous event. */
1701 tg3_wait_for_event_ack(tp);
1703 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1705 tg3_generate_fw_event(tp);
1707 /* Wait for RX cpu to ACK this event. */
1708 tg3_wait_for_event_ack(tp);
1712 /* tp->lock is held. */
1713 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1715 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1716 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1718 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1720 case RESET_KIND_INIT:
1721 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1725 case RESET_KIND_SHUTDOWN:
1726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1730 case RESET_KIND_SUSPEND:
1731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741 /* tp->lock is held. */
1742 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1744 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1746 case RESET_KIND_INIT:
1747 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748 DRV_STATE_START_DONE);
1751 case RESET_KIND_SHUTDOWN:
1752 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753 DRV_STATE_UNLOAD_DONE);
1762 /* tp->lock is held. */
1763 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1765 if (tg3_flag(tp, ENABLE_ASF)) {
1767 case RESET_KIND_INIT:
1768 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 case RESET_KIND_SHUTDOWN:
1773 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 case RESET_KIND_SUSPEND:
1778 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 static int tg3_poll_fw(struct tg3 *tp)
1793 if (tg3_flag(tp, NO_FWARE_REPORTED))
1796 if (tg3_flag(tp, IS_SSB_CORE)) {
1797 /* We don't use firmware. */
1801 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1802 /* Wait up to 20ms for init done. */
1803 for (i = 0; i < 200; i++) {
1804 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1811 /* Wait for firmware initialization to complete. */
1812 for (i = 0; i < 100000; i++) {
1813 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1814 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1819 /* Chip might not be fitted with firmware. Some Sun onboard
1820 * parts are configured like that. So don't signal the timeout
1821 * of the above loop as an error, but do report the lack of
1822 * running firmware once.
1824 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1825 tg3_flag_set(tp, NO_FWARE_REPORTED);
1827 netdev_info(tp->dev, "No firmware running\n");
1830 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1831 /* The 57765 A0 needs a little more
1832 * time to do some important work.
1840 static void tg3_link_report(struct tg3 *tp)
1842 if (!netif_carrier_ok(tp->dev)) {
1843 netif_info(tp, link, tp->dev, "Link is down\n");
1844 tg3_ump_link_report(tp);
1845 } else if (netif_msg_link(tp)) {
1846 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1847 (tp->link_config.active_speed == SPEED_1000 ?
1849 (tp->link_config.active_speed == SPEED_100 ?
1851 (tp->link_config.active_duplex == DUPLEX_FULL ?
1854 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1855 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1857 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1860 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1861 netdev_info(tp->dev, "EEE is %s\n",
1862 tp->setlpicnt ? "enabled" : "disabled");
1864 tg3_ump_link_report(tp);
1867 tp->link_up = netif_carrier_ok(tp->dev);
1870 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1874 if (adv & ADVERTISE_PAUSE_CAP) {
1875 flowctrl |= FLOW_CTRL_RX;
1876 if (!(adv & ADVERTISE_PAUSE_ASYM))
1877 flowctrl |= FLOW_CTRL_TX;
1878 } else if (adv & ADVERTISE_PAUSE_ASYM)
1879 flowctrl |= FLOW_CTRL_TX;
1884 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1888 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1889 miireg = ADVERTISE_1000XPAUSE;
1890 else if (flow_ctrl & FLOW_CTRL_TX)
1891 miireg = ADVERTISE_1000XPSE_ASYM;
1892 else if (flow_ctrl & FLOW_CTRL_RX)
1893 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1900 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1904 if (adv & ADVERTISE_1000XPAUSE) {
1905 flowctrl |= FLOW_CTRL_RX;
1906 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1907 flowctrl |= FLOW_CTRL_TX;
1908 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1909 flowctrl |= FLOW_CTRL_TX;
1914 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1918 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1919 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1920 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1921 if (lcladv & ADVERTISE_1000XPAUSE)
1923 if (rmtadv & ADVERTISE_1000XPAUSE)
1930 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1934 u32 old_rx_mode = tp->rx_mode;
1935 u32 old_tx_mode = tp->tx_mode;
1937 if (tg3_flag(tp, USE_PHYLIB))
1938 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1940 autoneg = tp->link_config.autoneg;
1942 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1943 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1944 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1946 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1948 flowctrl = tp->link_config.flowctrl;
1950 tp->link_config.active_flowctrl = flowctrl;
1952 if (flowctrl & FLOW_CTRL_RX)
1953 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1955 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1957 if (old_rx_mode != tp->rx_mode)
1958 tw32_f(MAC_RX_MODE, tp->rx_mode);
1960 if (flowctrl & FLOW_CTRL_TX)
1961 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1963 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1965 if (old_tx_mode != tp->tx_mode)
1966 tw32_f(MAC_TX_MODE, tp->tx_mode);
1969 static void tg3_adjust_link(struct net_device *dev)
1971 u8 oldflowctrl, linkmesg = 0;
1972 u32 mac_mode, lcl_adv, rmt_adv;
1973 struct tg3 *tp = netdev_priv(dev);
1974 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1976 spin_lock_bh(&tp->lock);
1978 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1979 MAC_MODE_HALF_DUPLEX);
1981 oldflowctrl = tp->link_config.active_flowctrl;
1987 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1988 mac_mode |= MAC_MODE_PORT_MODE_MII;
1989 else if (phydev->speed == SPEED_1000 ||
1990 tg3_asic_rev(tp) != ASIC_REV_5785)
1991 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1993 mac_mode |= MAC_MODE_PORT_MODE_MII;
1995 if (phydev->duplex == DUPLEX_HALF)
1996 mac_mode |= MAC_MODE_HALF_DUPLEX;
1998 lcl_adv = mii_advertise_flowctrl(
1999 tp->link_config.flowctrl);
2002 rmt_adv = LPA_PAUSE_CAP;
2003 if (phydev->asym_pause)
2004 rmt_adv |= LPA_PAUSE_ASYM;
2007 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2009 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2011 if (mac_mode != tp->mac_mode) {
2012 tp->mac_mode = mac_mode;
2013 tw32_f(MAC_MODE, tp->mac_mode);
2017 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2018 if (phydev->speed == SPEED_10)
2020 MAC_MI_STAT_10MBPS_MODE |
2021 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2023 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2026 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2027 tw32(MAC_TX_LENGTHS,
2028 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2029 (6 << TX_LENGTHS_IPG_SHIFT) |
2030 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2032 tw32(MAC_TX_LENGTHS,
2033 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2034 (6 << TX_LENGTHS_IPG_SHIFT) |
2035 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2037 if (phydev->link != tp->old_link ||
2038 phydev->speed != tp->link_config.active_speed ||
2039 phydev->duplex != tp->link_config.active_duplex ||
2040 oldflowctrl != tp->link_config.active_flowctrl)
2043 tp->old_link = phydev->link;
2044 tp->link_config.active_speed = phydev->speed;
2045 tp->link_config.active_duplex = phydev->duplex;
2047 spin_unlock_bh(&tp->lock);
2050 tg3_link_report(tp);
2053 static int tg3_phy_init(struct tg3 *tp)
2055 struct phy_device *phydev;
2057 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2060 /* Bring the PHY back to a known state. */
2063 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2065 /* Attach the MAC to the PHY. */
2066 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2067 tg3_adjust_link, phydev->interface);
2068 if (IS_ERR(phydev)) {
2069 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2070 return PTR_ERR(phydev);
2073 /* Mask with MAC supported features. */
2074 switch (phydev->interface) {
2075 case PHY_INTERFACE_MODE_GMII:
2076 case PHY_INTERFACE_MODE_RGMII:
2077 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2078 phydev->supported &= (PHY_GBIT_FEATURES |
2080 SUPPORTED_Asym_Pause);
2084 case PHY_INTERFACE_MODE_MII:
2085 phydev->supported &= (PHY_BASIC_FEATURES |
2087 SUPPORTED_Asym_Pause);
2090 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2094 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2096 phydev->advertising = phydev->supported;
2101 static void tg3_phy_start(struct tg3 *tp)
2103 struct phy_device *phydev;
2105 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2108 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2110 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2111 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2112 phydev->speed = tp->link_config.speed;
2113 phydev->duplex = tp->link_config.duplex;
2114 phydev->autoneg = tp->link_config.autoneg;
2115 phydev->advertising = tp->link_config.advertising;
2120 phy_start_aneg(phydev);
2123 static void tg3_phy_stop(struct tg3 *tp)
2125 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2128 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2131 static void tg3_phy_fini(struct tg3 *tp)
2133 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2134 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2135 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2139 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2144 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2147 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2148 /* Cannot do read-modify-write on 5401 */
2149 err = tg3_phy_auxctl_write(tp,
2150 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2151 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2156 err = tg3_phy_auxctl_read(tp,
2157 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2161 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2162 err = tg3_phy_auxctl_write(tp,
2163 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2169 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2173 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2176 tg3_writephy(tp, MII_TG3_FET_TEST,
2177 phytest | MII_TG3_FET_SHADOW_EN);
2178 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2180 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2182 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2183 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2185 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2189 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2193 if (!tg3_flag(tp, 5705_PLUS) ||
2194 (tg3_flag(tp, 5717_PLUS) &&
2195 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2198 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2199 tg3_phy_fet_toggle_apd(tp, enable);
2203 reg = MII_TG3_MISC_SHDW_WREN |
2204 MII_TG3_MISC_SHDW_SCR5_SEL |
2205 MII_TG3_MISC_SHDW_SCR5_LPED |
2206 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2207 MII_TG3_MISC_SHDW_SCR5_SDTL |
2208 MII_TG3_MISC_SHDW_SCR5_C125OE;
2209 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2210 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2212 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2215 reg = MII_TG3_MISC_SHDW_WREN |
2216 MII_TG3_MISC_SHDW_APD_SEL |
2217 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2219 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2221 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2224 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2228 if (!tg3_flag(tp, 5705_PLUS) ||
2229 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2232 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2236 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2238 tg3_writephy(tp, MII_TG3_FET_TEST,
2239 ephy | MII_TG3_FET_SHADOW_EN);
2240 if (!tg3_readphy(tp, reg, &phy)) {
2242 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2244 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2245 tg3_writephy(tp, reg, phy);
2247 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2252 ret = tg3_phy_auxctl_read(tp,
2253 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2256 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2258 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2259 tg3_phy_auxctl_write(tp,
2260 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2265 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2270 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2273 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2275 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2276 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2279 static void tg3_phy_apply_otp(struct tg3 *tp)
2288 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2291 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2292 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2293 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2295 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2296 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2297 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2299 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2300 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2301 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2303 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2304 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2306 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2307 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2309 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2310 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2311 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2313 tg3_phy_toggle_auxctl_smdsp(tp, false);
2316 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2319 struct ethtool_eee *dest = &tp->eee;
2321 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2327 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2330 /* Pull eee_active */
2331 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2332 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2333 dest->eee_active = 1;
2335 dest->eee_active = 0;
2337 /* Pull lp advertised settings */
2338 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2340 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2342 /* Pull advertised and eee_enabled settings */
2343 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2345 dest->eee_enabled = !!val;
2346 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2348 /* Pull tx_lpi_enabled */
2349 val = tr32(TG3_CPMU_EEE_MODE);
2350 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2352 /* Pull lpi timer value */
2353 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2356 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2360 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2365 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2367 tp->link_config.active_duplex == DUPLEX_FULL &&
2368 (tp->link_config.active_speed == SPEED_100 ||
2369 tp->link_config.active_speed == SPEED_1000)) {
2372 if (tp->link_config.active_speed == SPEED_1000)
2373 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2375 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2377 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2379 tg3_eee_pull_config(tp, NULL);
2380 if (tp->eee.eee_active)
2384 if (!tp->setlpicnt) {
2385 if (current_link_up &&
2386 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2387 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2388 tg3_phy_toggle_auxctl_smdsp(tp, false);
2391 val = tr32(TG3_CPMU_EEE_MODE);
2392 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2396 static void tg3_phy_eee_enable(struct tg3 *tp)
2400 if (tp->link_config.active_speed == SPEED_1000 &&
2401 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2402 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2403 tg3_flag(tp, 57765_CLASS)) &&
2404 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2405 val = MII_TG3_DSP_TAP26_ALNOKO |
2406 MII_TG3_DSP_TAP26_RMRXSTO;
2407 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2408 tg3_phy_toggle_auxctl_smdsp(tp, false);
2411 val = tr32(TG3_CPMU_EEE_MODE);
2412 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2415 static int tg3_wait_macro_done(struct tg3 *tp)
2422 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2423 if ((tmp32 & 0x1000) == 0)
2433 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2435 static const u32 test_pat[4][6] = {
2436 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2437 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2438 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2439 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2443 for (chan = 0; chan < 4; chan++) {
2446 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2447 (chan * 0x2000) | 0x0200);
2448 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2450 for (i = 0; i < 6; i++)
2451 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2454 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2455 if (tg3_wait_macro_done(tp)) {
2460 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2461 (chan * 0x2000) | 0x0200);
2462 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2463 if (tg3_wait_macro_done(tp)) {
2468 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2469 if (tg3_wait_macro_done(tp)) {
2474 for (i = 0; i < 6; i += 2) {
2477 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2478 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2479 tg3_wait_macro_done(tp)) {
2485 if (low != test_pat[chan][i] ||
2486 high != test_pat[chan][i+1]) {
2487 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2488 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2489 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2499 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2503 for (chan = 0; chan < 4; chan++) {
2506 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2507 (chan * 0x2000) | 0x0200);
2508 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2509 for (i = 0; i < 6; i++)
2510 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2511 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2512 if (tg3_wait_macro_done(tp))
2519 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2521 u32 reg32, phy9_orig;
2522 int retries, do_phy_reset, err;
2528 err = tg3_bmcr_reset(tp);
2534 /* Disable transmitter and interrupt. */
2535 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2539 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2541 /* Set full-duplex, 1000 mbps. */
2542 tg3_writephy(tp, MII_BMCR,
2543 BMCR_FULLDPLX | BMCR_SPEED1000);
2545 /* Set to master mode. */
2546 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2549 tg3_writephy(tp, MII_CTRL1000,
2550 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2552 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2556 /* Block the PHY control access. */
2557 tg3_phydsp_write(tp, 0x8005, 0x0800);
2559 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2562 } while (--retries);
2564 err = tg3_phy_reset_chanpat(tp);
2568 tg3_phydsp_write(tp, 0x8005, 0x0000);
2570 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2571 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2573 tg3_phy_toggle_auxctl_smdsp(tp, false);
2575 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2577 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2579 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2586 static void tg3_carrier_off(struct tg3 *tp)
2588 netif_carrier_off(tp->dev);
2589 tp->link_up = false;
2592 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2594 if (tg3_flag(tp, ENABLE_ASF))
2595 netdev_warn(tp->dev,
2596 "Management side-band traffic will be interrupted during phy settings change\n");
2599 /* This will reset the tigon3 PHY if there is no valid
2600 * link unless the FORCE argument is non-zero.
2602 static int tg3_phy_reset(struct tg3 *tp)
2607 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2608 val = tr32(GRC_MISC_CFG);
2609 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2612 err = tg3_readphy(tp, MII_BMSR, &val);
2613 err |= tg3_readphy(tp, MII_BMSR, &val);
2617 if (netif_running(tp->dev) && tp->link_up) {
2618 netif_carrier_off(tp->dev);
2619 tg3_link_report(tp);
2622 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2623 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2624 tg3_asic_rev(tp) == ASIC_REV_5705) {
2625 err = tg3_phy_reset_5703_4_5(tp);
2632 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2633 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2634 cpmuctrl = tr32(TG3_CPMU_CTRL);
2635 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2637 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2640 err = tg3_bmcr_reset(tp);
2644 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2645 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2646 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2648 tw32(TG3_CPMU_CTRL, cpmuctrl);
2651 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2652 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2653 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2654 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2655 CPMU_LSPD_1000MB_MACCLK_12_5) {
2656 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2658 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2662 if (tg3_flag(tp, 5717_PLUS) &&
2663 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2666 tg3_phy_apply_otp(tp);
2668 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2669 tg3_phy_toggle_apd(tp, true);
2671 tg3_phy_toggle_apd(tp, false);
2674 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2675 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2676 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2677 tg3_phydsp_write(tp, 0x000a, 0x0323);
2678 tg3_phy_toggle_auxctl_smdsp(tp, false);
2681 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2682 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2683 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2686 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2687 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2688 tg3_phydsp_write(tp, 0x000a, 0x310b);
2689 tg3_phydsp_write(tp, 0x201f, 0x9506);
2690 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2691 tg3_phy_toggle_auxctl_smdsp(tp, false);
2693 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2694 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2695 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2696 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2697 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2698 tg3_writephy(tp, MII_TG3_TEST1,
2699 MII_TG3_TEST1_TRIM_EN | 0x4);
2701 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2703 tg3_phy_toggle_auxctl_smdsp(tp, false);
2707 /* Set Extended packet length bit (bit 14) on all chips that */
2708 /* support jumbo frames */
2709 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2710 /* Cannot do read-modify-write on 5401 */
2711 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2712 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2713 /* Set bit 14 with read-modify-write to preserve other bits */
2714 err = tg3_phy_auxctl_read(tp,
2715 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2717 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2718 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2721 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2722 * jumbo frames transmission.
2724 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2725 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2726 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2727 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2730 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2731 /* adjust output voltage */
2732 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2735 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2736 tg3_phydsp_write(tp, 0xffb, 0x4000);
2738 tg3_phy_toggle_automdix(tp, true);
2739 tg3_phy_set_wirespeed(tp);
2743 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2744 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2745 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2746 TG3_GPIO_MSG_NEED_VAUX)
2747 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2748 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2749 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2750 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2751 (TG3_GPIO_MSG_DRVR_PRES << 12))
2753 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2754 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2755 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2756 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2757 (TG3_GPIO_MSG_NEED_VAUX << 12))
2759 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2763 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2764 tg3_asic_rev(tp) == ASIC_REV_5719)
2765 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2767 status = tr32(TG3_CPMU_DRV_STATUS);
2769 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2770 status &= ~(TG3_GPIO_MSG_MASK << shift);
2771 status |= (newstat << shift);
2773 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2774 tg3_asic_rev(tp) == ASIC_REV_5719)
2775 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2777 tw32(TG3_CPMU_DRV_STATUS, status);
2779 return status >> TG3_APE_GPIO_MSG_SHIFT;
2782 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2784 if (!tg3_flag(tp, IS_NIC))
2787 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2788 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2789 tg3_asic_rev(tp) == ASIC_REV_5720) {
2790 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2793 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2795 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2796 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2800 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2801 TG3_GRC_LCLCTL_PWRSW_DELAY);
2807 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2811 if (!tg3_flag(tp, IS_NIC) ||
2812 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2813 tg3_asic_rev(tp) == ASIC_REV_5701)
2816 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2818 tw32_wait_f(GRC_LOCAL_CTRL,
2819 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2820 TG3_GRC_LCLCTL_PWRSW_DELAY);
2822 tw32_wait_f(GRC_LOCAL_CTRL,
2824 TG3_GRC_LCLCTL_PWRSW_DELAY);
2826 tw32_wait_f(GRC_LOCAL_CTRL,
2827 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY);
2831 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2833 if (!tg3_flag(tp, IS_NIC))
2836 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2837 tg3_asic_rev(tp) == ASIC_REV_5701) {
2838 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2839 (GRC_LCLCTRL_GPIO_OE0 |
2840 GRC_LCLCTRL_GPIO_OE1 |
2841 GRC_LCLCTRL_GPIO_OE2 |
2842 GRC_LCLCTRL_GPIO_OUTPUT0 |
2843 GRC_LCLCTRL_GPIO_OUTPUT1),
2844 TG3_GRC_LCLCTL_PWRSW_DELAY);
2845 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2846 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2847 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2848 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2849 GRC_LCLCTRL_GPIO_OE1 |
2850 GRC_LCLCTRL_GPIO_OE2 |
2851 GRC_LCLCTRL_GPIO_OUTPUT0 |
2852 GRC_LCLCTRL_GPIO_OUTPUT1 |
2854 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2855 TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2858 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2859 TG3_GRC_LCLCTL_PWRSW_DELAY);
2861 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2862 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2863 TG3_GRC_LCLCTL_PWRSW_DELAY);
2866 u32 grc_local_ctrl = 0;
2868 /* Workaround to prevent overdrawing Amps. */
2869 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2870 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2871 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2873 TG3_GRC_LCLCTL_PWRSW_DELAY);
2876 /* On 5753 and variants, GPIO2 cannot be used. */
2877 no_gpio2 = tp->nic_sram_data_cfg &
2878 NIC_SRAM_DATA_CFG_NO_GPIO2;
2880 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2881 GRC_LCLCTRL_GPIO_OE1 |
2882 GRC_LCLCTRL_GPIO_OE2 |
2883 GRC_LCLCTRL_GPIO_OUTPUT1 |
2884 GRC_LCLCTRL_GPIO_OUTPUT2;
2886 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2887 GRC_LCLCTRL_GPIO_OUTPUT2);
2889 tw32_wait_f(GRC_LOCAL_CTRL,
2890 tp->grc_local_ctrl | grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2893 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2895 tw32_wait_f(GRC_LOCAL_CTRL,
2896 tp->grc_local_ctrl | grc_local_ctrl,
2897 TG3_GRC_LCLCTL_PWRSW_DELAY);
2900 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2901 tw32_wait_f(GRC_LOCAL_CTRL,
2902 tp->grc_local_ctrl | grc_local_ctrl,
2903 TG3_GRC_LCLCTL_PWRSW_DELAY);
2908 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2912 /* Serialize power state transitions */
2913 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2916 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2917 msg = TG3_GPIO_MSG_NEED_VAUX;
2919 msg = tg3_set_function_status(tp, msg);
2921 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2924 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2925 tg3_pwrsrc_switch_to_vaux(tp);
2927 tg3_pwrsrc_die_with_vmain(tp);
2930 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2933 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2935 bool need_vaux = false;
2937 /* The GPIOs do something completely different on 57765. */
2938 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2941 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2942 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2943 tg3_asic_rev(tp) == ASIC_REV_5720) {
2944 tg3_frob_aux_power_5717(tp, include_wol ?
2945 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2949 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2950 struct net_device *dev_peer;
2952 dev_peer = pci_get_drvdata(tp->pdev_peer);
2954 /* remove_one() may have been run on the peer. */
2956 struct tg3 *tp_peer = netdev_priv(dev_peer);
2958 if (tg3_flag(tp_peer, INIT_COMPLETE))
2961 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2962 tg3_flag(tp_peer, ENABLE_ASF))
2967 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2968 tg3_flag(tp, ENABLE_ASF))
2972 tg3_pwrsrc_switch_to_vaux(tp);
2974 tg3_pwrsrc_die_with_vmain(tp);
2977 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2979 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2981 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2982 if (speed != SPEED_10)
2984 } else if (speed == SPEED_10)
2990 static bool tg3_phy_power_bug(struct tg3 *tp)
2992 switch (tg3_asic_rev(tp)) {
2997 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3006 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3015 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3019 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3022 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3023 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3024 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3025 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3028 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3029 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3030 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3035 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3037 val = tr32(GRC_MISC_CFG);
3038 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3041 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3043 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3046 tg3_writephy(tp, MII_ADVERTISE, 0);
3047 tg3_writephy(tp, MII_BMCR,
3048 BMCR_ANENABLE | BMCR_ANRESTART);
3050 tg3_writephy(tp, MII_TG3_FET_TEST,
3051 phytest | MII_TG3_FET_SHADOW_EN);
3052 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3053 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3055 MII_TG3_FET_SHDW_AUXMODE4,
3058 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3061 } else if (do_low_power) {
3062 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3063 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3065 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3066 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3067 MII_TG3_AUXCTL_PCTL_VREG_11V;
3068 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3071 /* The PHY should not be powered down on some chips because
3074 if (tg3_phy_power_bug(tp))
3077 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3078 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3079 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3080 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3081 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3082 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3085 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3088 /* tp->lock is held. */
3089 static int tg3_nvram_lock(struct tg3 *tp)
3091 if (tg3_flag(tp, NVRAM)) {
3094 if (tp->nvram_lock_cnt == 0) {
3095 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3096 for (i = 0; i < 8000; i++) {
3097 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3102 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3106 tp->nvram_lock_cnt++;
3111 /* tp->lock is held. */
3112 static void tg3_nvram_unlock(struct tg3 *tp)
3114 if (tg3_flag(tp, NVRAM)) {
3115 if (tp->nvram_lock_cnt > 0)
3116 tp->nvram_lock_cnt--;
3117 if (tp->nvram_lock_cnt == 0)
3118 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3122 /* tp->lock is held. */
3123 static void tg3_enable_nvram_access(struct tg3 *tp)
3125 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3126 u32 nvaccess = tr32(NVRAM_ACCESS);
3128 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3132 /* tp->lock is held. */
3133 static void tg3_disable_nvram_access(struct tg3 *tp)
3135 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3136 u32 nvaccess = tr32(NVRAM_ACCESS);
3138 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3142 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3143 u32 offset, u32 *val)
3148 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3151 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3152 EEPROM_ADDR_DEVID_MASK |
3154 tw32(GRC_EEPROM_ADDR,
3156 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3157 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3158 EEPROM_ADDR_ADDR_MASK) |
3159 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3161 for (i = 0; i < 1000; i++) {
3162 tmp = tr32(GRC_EEPROM_ADDR);
3164 if (tmp & EEPROM_ADDR_COMPLETE)
3168 if (!(tmp & EEPROM_ADDR_COMPLETE))
3171 tmp = tr32(GRC_EEPROM_DATA);
3174 * The data will always be opposite the native endian
3175 * format. Perform a blind byteswap to compensate.
3182 #define NVRAM_CMD_TIMEOUT 10000
3184 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3188 tw32(NVRAM_CMD, nvram_cmd);
3189 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3191 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3197 if (i == NVRAM_CMD_TIMEOUT)
3203 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3205 if (tg3_flag(tp, NVRAM) &&
3206 tg3_flag(tp, NVRAM_BUFFERED) &&
3207 tg3_flag(tp, FLASH) &&
3208 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3209 (tp->nvram_jedecnum == JEDEC_ATMEL))
3211 addr = ((addr / tp->nvram_pagesize) <<
3212 ATMEL_AT45DB0X1B_PAGE_POS) +
3213 (addr % tp->nvram_pagesize);
3218 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3220 if (tg3_flag(tp, NVRAM) &&
3221 tg3_flag(tp, NVRAM_BUFFERED) &&
3222 tg3_flag(tp, FLASH) &&
3223 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3224 (tp->nvram_jedecnum == JEDEC_ATMEL))
3226 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3227 tp->nvram_pagesize) +
3228 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3233 /* NOTE: Data read in from NVRAM is byteswapped according to
3234 * the byteswapping settings for all other register accesses.
3235 * tg3 devices are BE devices, so on a BE machine, the data
3236 * returned will be exactly as it is seen in NVRAM. On a LE
3237 * machine, the 32-bit value will be byteswapped.
3239 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3243 if (!tg3_flag(tp, NVRAM))
3244 return tg3_nvram_read_using_eeprom(tp, offset, val);
3246 offset = tg3_nvram_phys_addr(tp, offset);
3248 if (offset > NVRAM_ADDR_MSK)
3251 ret = tg3_nvram_lock(tp);
3255 tg3_enable_nvram_access(tp);
3257 tw32(NVRAM_ADDR, offset);
3258 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3259 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3262 *val = tr32(NVRAM_RDDATA);
3264 tg3_disable_nvram_access(tp);
3266 tg3_nvram_unlock(tp);
3271 /* Ensures NVRAM data is in bytestream format. */
3272 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3275 int res = tg3_nvram_read(tp, offset, &v);
3277 *val = cpu_to_be32(v);
3281 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3282 u32 offset, u32 len, u8 *buf)
3287 for (i = 0; i < len; i += 4) {
3293 memcpy(&data, buf + i, 4);
3296 * The SEEPROM interface expects the data to always be opposite
3297 * the native endian format. We accomplish this by reversing
3298 * all the operations that would have been performed on the
3299 * data from a call to tg3_nvram_read_be32().
3301 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3303 val = tr32(GRC_EEPROM_ADDR);
3304 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3306 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3308 tw32(GRC_EEPROM_ADDR, val |
3309 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3310 (addr & EEPROM_ADDR_ADDR_MASK) |
3314 for (j = 0; j < 1000; j++) {
3315 val = tr32(GRC_EEPROM_ADDR);
3317 if (val & EEPROM_ADDR_COMPLETE)
3321 if (!(val & EEPROM_ADDR_COMPLETE)) {
3330 /* offset and length are dword aligned */
3331 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3335 u32 pagesize = tp->nvram_pagesize;
3336 u32 pagemask = pagesize - 1;
3340 tmp = kmalloc(pagesize, GFP_KERNEL);
3346 u32 phy_addr, page_off, size;
3348 phy_addr = offset & ~pagemask;
3350 for (j = 0; j < pagesize; j += 4) {
3351 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3352 (__be32 *) (tmp + j));
3359 page_off = offset & pagemask;
3366 memcpy(tmp + page_off, buf, size);
3368 offset = offset + (pagesize - page_off);
3370 tg3_enable_nvram_access(tp);
3373 * Before we can erase the flash page, we need
3374 * to issue a special "write enable" command.
3376 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3378 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3381 /* Erase the target page */
3382 tw32(NVRAM_ADDR, phy_addr);
3384 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3385 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3387 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3390 /* Issue another write enable to start the write. */
3391 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3393 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3396 for (j = 0; j < pagesize; j += 4) {
3399 data = *((__be32 *) (tmp + j));
3401 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3403 tw32(NVRAM_ADDR, phy_addr + j);
3405 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3409 nvram_cmd |= NVRAM_CMD_FIRST;
3410 else if (j == (pagesize - 4))
3411 nvram_cmd |= NVRAM_CMD_LAST;
3413 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3421 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3422 tg3_nvram_exec_cmd(tp, nvram_cmd);
3429 /* offset and length are dword aligned */
3430 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3435 for (i = 0; i < len; i += 4, offset += 4) {
3436 u32 page_off, phy_addr, nvram_cmd;
3439 memcpy(&data, buf + i, 4);
3440 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3442 page_off = offset % tp->nvram_pagesize;
3444 phy_addr = tg3_nvram_phys_addr(tp, offset);
3446 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3448 if (page_off == 0 || i == 0)
3449 nvram_cmd |= NVRAM_CMD_FIRST;
3450 if (page_off == (tp->nvram_pagesize - 4))
3451 nvram_cmd |= NVRAM_CMD_LAST;
3454 nvram_cmd |= NVRAM_CMD_LAST;
3456 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3457 !tg3_flag(tp, FLASH) ||
3458 !tg3_flag(tp, 57765_PLUS))
3459 tw32(NVRAM_ADDR, phy_addr);
3461 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3462 !tg3_flag(tp, 5755_PLUS) &&
3463 (tp->nvram_jedecnum == JEDEC_ST) &&
3464 (nvram_cmd & NVRAM_CMD_FIRST)) {
3467 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3468 ret = tg3_nvram_exec_cmd(tp, cmd);
3472 if (!tg3_flag(tp, FLASH)) {
3473 /* We always do complete word writes to eeprom. */
3474 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3477 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3484 /* offset and length are dword aligned */
3485 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3489 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3490 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3491 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3495 if (!tg3_flag(tp, NVRAM)) {
3496 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3500 ret = tg3_nvram_lock(tp);
3504 tg3_enable_nvram_access(tp);
3505 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3506 tw32(NVRAM_WRITE1, 0x406);
3508 grc_mode = tr32(GRC_MODE);
3509 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3511 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3512 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3515 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3519 grc_mode = tr32(GRC_MODE);
3520 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3522 tg3_disable_nvram_access(tp);
3523 tg3_nvram_unlock(tp);
3526 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3527 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3534 #define RX_CPU_SCRATCH_BASE 0x30000
3535 #define RX_CPU_SCRATCH_SIZE 0x04000
3536 #define TX_CPU_SCRATCH_BASE 0x34000
3537 #define TX_CPU_SCRATCH_SIZE 0x04000
3539 /* tp->lock is held. */
3540 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3543 const int iters = 10000;
3545 for (i = 0; i < iters; i++) {
3546 tw32(cpu_base + CPU_STATE, 0xffffffff);
3547 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3548 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3552 return (i == iters) ? -EBUSY : 0;
3555 /* tp->lock is held. */
3556 static int tg3_rxcpu_pause(struct tg3 *tp)
3558 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3560 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3561 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3567 /* tp->lock is held. */
3568 static int tg3_txcpu_pause(struct tg3 *tp)
3570 return tg3_pause_cpu(tp, TX_CPU_BASE);
3573 /* tp->lock is held. */
3574 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3576 tw32(cpu_base + CPU_STATE, 0xffffffff);
3577 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3580 /* tp->lock is held. */
3581 static void tg3_rxcpu_resume(struct tg3 *tp)
3583 tg3_resume_cpu(tp, RX_CPU_BASE);
3586 /* tp->lock is held. */
3587 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3591 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3593 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3594 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3596 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3599 if (cpu_base == RX_CPU_BASE) {
3600 rc = tg3_rxcpu_pause(tp);
3603 * There is only an Rx CPU for the 5750 derivative in the
3606 if (tg3_flag(tp, IS_SSB_CORE))
3609 rc = tg3_txcpu_pause(tp);
3613 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3614 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3618 /* Clear firmware's nvram arbitration. */
3619 if (tg3_flag(tp, NVRAM))
3620 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3624 static int tg3_fw_data_len(struct tg3 *tp,
3625 const struct tg3_firmware_hdr *fw_hdr)
3629 /* Non fragmented firmware have one firmware header followed by a
3630 * contiguous chunk of data to be written. The length field in that
3631 * header is not the length of data to be written but the complete
3632 * length of the bss. The data length is determined based on
3633 * tp->fw->size minus headers.
3635 * Fragmented firmware have a main header followed by multiple
3636 * fragments. Each fragment is identical to non fragmented firmware
3637 * with a firmware header followed by a contiguous chunk of data. In
3638 * the main header, the length field is unused and set to 0xffffffff.
3639 * In each fragment header the length is the entire size of that
3640 * fragment i.e. fragment data + header length. Data length is
3641 * therefore length field in the header minus TG3_FW_HDR_LEN.
3643 if (tp->fw_len == 0xffffffff)
3644 fw_len = be32_to_cpu(fw_hdr->len);
3646 fw_len = tp->fw->size;
3648 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3651 /* tp->lock is held. */
3652 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3653 u32 cpu_scratch_base, int cpu_scratch_size,
3654 const struct tg3_firmware_hdr *fw_hdr)
3657 void (*write_op)(struct tg3 *, u32, u32);
3658 int total_len = tp->fw->size;
3660 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3662 "%s: Trying to load TX cpu firmware which is 5705\n",
3667 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3668 write_op = tg3_write_mem;
3670 write_op = tg3_write_indirect_reg32;
3672 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3673 /* It is possible that bootcode is still loading at this point.
3674 * Get the nvram lock first before halting the cpu.
3676 int lock_err = tg3_nvram_lock(tp);
3677 err = tg3_halt_cpu(tp, cpu_base);
3679 tg3_nvram_unlock(tp);
3683 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3684 write_op(tp, cpu_scratch_base + i, 0);
3685 tw32(cpu_base + CPU_STATE, 0xffffffff);
3686 tw32(cpu_base + CPU_MODE,
3687 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3689 /* Subtract additional main header for fragmented firmware and
3690 * advance to the first fragment
3692 total_len -= TG3_FW_HDR_LEN;
3697 u32 *fw_data = (u32 *)(fw_hdr + 1);
3698 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3699 write_op(tp, cpu_scratch_base +
3700 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3702 be32_to_cpu(fw_data[i]));
3704 total_len -= be32_to_cpu(fw_hdr->len);
3706 /* Advance to next fragment */
3707 fw_hdr = (struct tg3_firmware_hdr *)
3708 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3709 } while (total_len > 0);
3717 /* tp->lock is held. */
3718 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3721 const int iters = 5;
3723 tw32(cpu_base + CPU_STATE, 0xffffffff);
3724 tw32_f(cpu_base + CPU_PC, pc);
3726 for (i = 0; i < iters; i++) {
3727 if (tr32(cpu_base + CPU_PC) == pc)
3729 tw32(cpu_base + CPU_STATE, 0xffffffff);
3730 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3731 tw32_f(cpu_base + CPU_PC, pc);
3735 return (i == iters) ? -EBUSY : 0;
3738 /* tp->lock is held. */
3739 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3741 const struct tg3_firmware_hdr *fw_hdr;
3744 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3746 /* Firmware blob starts with version numbers, followed by
3747 start address and length. We are setting complete length.
3748 length = end_address_of_bss - start_address_of_text.
3749 Remainder is the blob to be loaded contiguously
3750 from start address. */
3752 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3753 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3758 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3759 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3764 /* Now startup only the RX cpu. */
3765 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3766 be32_to_cpu(fw_hdr->base_addr));
3768 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3769 "should be %08x\n", __func__,
3770 tr32(RX_CPU_BASE + CPU_PC),
3771 be32_to_cpu(fw_hdr->base_addr));
3775 tg3_rxcpu_resume(tp);
3780 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3782 const int iters = 1000;
3786 /* Wait for boot code to complete initialization and enter service
3787 * loop. It is then safe to download service patches
3789 for (i = 0; i < iters; i++) {
3790 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3797 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3801 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3803 netdev_warn(tp->dev,
3804 "Other patches exist. Not downloading EEE patch\n");
3811 /* tp->lock is held. */
3812 static void tg3_load_57766_firmware(struct tg3 *tp)
3814 struct tg3_firmware_hdr *fw_hdr;
3816 if (!tg3_flag(tp, NO_NVRAM))
3819 if (tg3_validate_rxcpu_state(tp))
3825 /* This firmware blob has a different format than older firmware
3826 * releases as given below. The main difference is we have fragmented
3827 * data to be written to non-contiguous locations.
3829 * In the beginning we have a firmware header identical to other
3830 * firmware which consists of version, base addr and length. The length
3831 * here is unused and set to 0xffffffff.
3833 * This is followed by a series of firmware fragments which are
3834 * individually identical to previous firmware. i.e. they have the
3835 * firmware header and followed by data for that fragment. The version
3836 * field of the individual fragment header is unused.
3839 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3840 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3843 if (tg3_rxcpu_pause(tp))
3846 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3847 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3849 tg3_rxcpu_resume(tp);
3852 /* tp->lock is held. */
3853 static int tg3_load_tso_firmware(struct tg3 *tp)
3855 const struct tg3_firmware_hdr *fw_hdr;
3856 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3859 if (!tg3_flag(tp, FW_TSO))
3862 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3864 /* Firmware blob starts with version numbers, followed by
3865 start address and length. We are setting complete length.
3866 length = end_address_of_bss - start_address_of_text.
3867 Remainder is the blob to be loaded contiguously
3868 from start address. */
3870 cpu_scratch_size = tp->fw_len;
3872 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3873 cpu_base = RX_CPU_BASE;
3874 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3876 cpu_base = TX_CPU_BASE;
3877 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3878 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3881 err = tg3_load_firmware_cpu(tp, cpu_base,
3882 cpu_scratch_base, cpu_scratch_size,
3887 /* Now startup the cpu. */
3888 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3889 be32_to_cpu(fw_hdr->base_addr));
3892 "%s fails to set CPU PC, is %08x should be %08x\n",
3893 __func__, tr32(cpu_base + CPU_PC),
3894 be32_to_cpu(fw_hdr->base_addr));
3898 tg3_resume_cpu(tp, cpu_base);
3903 /* tp->lock is held. */
3904 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3906 u32 addr_high, addr_low;
3909 addr_high = ((tp->dev->dev_addr[0] << 8) |
3910 tp->dev->dev_addr[1]);
3911 addr_low = ((tp->dev->dev_addr[2] << 24) |
3912 (tp->dev->dev_addr[3] << 16) |
3913 (tp->dev->dev_addr[4] << 8) |
3914 (tp->dev->dev_addr[5] << 0));
3915 for (i = 0; i < 4; i++) {
3916 if (i == 1 && skip_mac_1)
3918 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3919 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3922 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3923 tg3_asic_rev(tp) == ASIC_REV_5704) {
3924 for (i = 0; i < 12; i++) {
3925 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3926 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3930 addr_high = (tp->dev->dev_addr[0] +
3931 tp->dev->dev_addr[1] +
3932 tp->dev->dev_addr[2] +
3933 tp->dev->dev_addr[3] +
3934 tp->dev->dev_addr[4] +
3935 tp->dev->dev_addr[5]) &
3936 TX_BACKOFF_SEED_MASK;
3937 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3940 static void tg3_enable_register_access(struct tg3 *tp)
3943 * Make sure register accesses (indirect or otherwise) will function
3946 pci_write_config_dword(tp->pdev,
3947 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3950 static int tg3_power_up(struct tg3 *tp)
3954 tg3_enable_register_access(tp);
3956 err = pci_set_power_state(tp->pdev, PCI_D0);
3958 /* Switch out of Vaux if it is a NIC */
3959 tg3_pwrsrc_switch_to_vmain(tp);
3961 netdev_err(tp->dev, "Transition to D0 failed\n");
3967 static int tg3_setup_phy(struct tg3 *, bool);
3969 static int tg3_power_down_prepare(struct tg3 *tp)
3972 bool device_should_wake, do_low_power;
3974 tg3_enable_register_access(tp);
3976 /* Restore the CLKREQ setting. */
3977 if (tg3_flag(tp, CLKREQ_BUG))
3978 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3979 PCI_EXP_LNKCTL_CLKREQ_EN);
3981 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3982 tw32(TG3PCI_MISC_HOST_CTRL,
3983 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3985 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3986 tg3_flag(tp, WOL_ENABLE);
3988 if (tg3_flag(tp, USE_PHYLIB)) {
3989 do_low_power = false;
3990 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3991 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3992 struct phy_device *phydev;
3993 u32 phyid, advertising;
3995 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3997 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3999 tp->link_config.speed = phydev->speed;
4000 tp->link_config.duplex = phydev->duplex;
4001 tp->link_config.autoneg = phydev->autoneg;
4002 tp->link_config.advertising = phydev->advertising;
4004 advertising = ADVERTISED_TP |
4006 ADVERTISED_Autoneg |
4007 ADVERTISED_10baseT_Half;
4009 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4010 if (tg3_flag(tp, WOL_SPEED_100MB))
4012 ADVERTISED_100baseT_Half |
4013 ADVERTISED_100baseT_Full |
4014 ADVERTISED_10baseT_Full;
4016 advertising |= ADVERTISED_10baseT_Full;
4019 phydev->advertising = advertising;
4021 phy_start_aneg(phydev);
4023 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4024 if (phyid != PHY_ID_BCMAC131) {
4025 phyid &= PHY_BCM_OUI_MASK;
4026 if (phyid == PHY_BCM_OUI_1 ||
4027 phyid == PHY_BCM_OUI_2 ||
4028 phyid == PHY_BCM_OUI_3)
4029 do_low_power = true;
4033 do_low_power = true;
4035 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4036 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4038 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4039 tg3_setup_phy(tp, false);
4042 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4045 val = tr32(GRC_VCPU_EXT_CTRL);
4046 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4047 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4051 for (i = 0; i < 200; i++) {
4052 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4053 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4058 if (tg3_flag(tp, WOL_CAP))
4059 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4060 WOL_DRV_STATE_SHUTDOWN |
4064 if (device_should_wake) {
4067 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4069 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4070 tg3_phy_auxctl_write(tp,
4071 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4072 MII_TG3_AUXCTL_PCTL_WOL_EN |
4073 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4074 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4078 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4079 mac_mode = MAC_MODE_PORT_MODE_GMII;
4080 else if (tp->phy_flags &
4081 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4082 if (tp->link_config.active_speed == SPEED_1000)
4083 mac_mode = MAC_MODE_PORT_MODE_GMII;
4085 mac_mode = MAC_MODE_PORT_MODE_MII;
4087 mac_mode = MAC_MODE_PORT_MODE_MII;
4089 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4090 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4091 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4092 SPEED_100 : SPEED_10;
4093 if (tg3_5700_link_polarity(tp, speed))
4094 mac_mode |= MAC_MODE_LINK_POLARITY;
4096 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4099 mac_mode = MAC_MODE_PORT_MODE_TBI;
4102 if (!tg3_flag(tp, 5750_PLUS))
4103 tw32(MAC_LED_CTRL, tp->led_ctrl);
4105 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4106 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4107 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4108 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4110 if (tg3_flag(tp, ENABLE_APE))
4111 mac_mode |= MAC_MODE_APE_TX_EN |
4112 MAC_MODE_APE_RX_EN |
4113 MAC_MODE_TDE_ENABLE;
4115 tw32_f(MAC_MODE, mac_mode);
4118 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4122 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4123 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4124 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4127 base_val = tp->pci_clock_ctrl;
4128 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4129 CLOCK_CTRL_TXCLK_DISABLE);
4131 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4132 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4133 } else if (tg3_flag(tp, 5780_CLASS) ||
4134 tg3_flag(tp, CPMU_PRESENT) ||
4135 tg3_asic_rev(tp) == ASIC_REV_5906) {
4137 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4138 u32 newbits1, newbits2;
4140 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4141 tg3_asic_rev(tp) == ASIC_REV_5701) {
4142 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4143 CLOCK_CTRL_TXCLK_DISABLE |
4145 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4146 } else if (tg3_flag(tp, 5705_PLUS)) {
4147 newbits1 = CLOCK_CTRL_625_CORE;
4148 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4150 newbits1 = CLOCK_CTRL_ALTCLK;
4151 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4154 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4157 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4160 if (!tg3_flag(tp, 5705_PLUS)) {
4163 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4164 tg3_asic_rev(tp) == ASIC_REV_5701) {
4165 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4166 CLOCK_CTRL_TXCLK_DISABLE |
4167 CLOCK_CTRL_44MHZ_CORE);
4169 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4172 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4173 tp->pci_clock_ctrl | newbits3, 40);
4177 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4178 tg3_power_down_phy(tp, do_low_power);
4180 tg3_frob_aux_power(tp, true);
4182 /* Workaround for unstable PLL clock */
4183 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4184 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4185 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4186 u32 val = tr32(0x7d00);
4188 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4190 if (!tg3_flag(tp, ENABLE_ASF)) {
4193 err = tg3_nvram_lock(tp);
4194 tg3_halt_cpu(tp, RX_CPU_BASE);
4196 tg3_nvram_unlock(tp);
4200 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4202 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4207 static void tg3_power_down(struct tg3 *tp)
4209 tg3_power_down_prepare(tp);
4211 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4212 pci_set_power_state(tp->pdev, PCI_D3hot);
4215 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4217 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4218 case MII_TG3_AUX_STAT_10HALF:
4220 *duplex = DUPLEX_HALF;
4223 case MII_TG3_AUX_STAT_10FULL:
4225 *duplex = DUPLEX_FULL;
4228 case MII_TG3_AUX_STAT_100HALF:
4230 *duplex = DUPLEX_HALF;
4233 case MII_TG3_AUX_STAT_100FULL:
4235 *duplex = DUPLEX_FULL;
4238 case MII_TG3_AUX_STAT_1000HALF:
4239 *speed = SPEED_1000;
4240 *duplex = DUPLEX_HALF;
4243 case MII_TG3_AUX_STAT_1000FULL:
4244 *speed = SPEED_1000;
4245 *duplex = DUPLEX_FULL;
4249 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4250 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4252 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4256 *speed = SPEED_UNKNOWN;
4257 *duplex = DUPLEX_UNKNOWN;
4262 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4267 new_adv = ADVERTISE_CSMA;
4268 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4269 new_adv |= mii_advertise_flowctrl(flowctrl);
4271 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4275 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4276 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4278 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4279 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4280 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4282 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4287 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4290 tw32(TG3_CPMU_EEE_MODE,
4291 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4293 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4298 /* Advertise 100-BaseTX EEE ability */
4299 if (advertise & ADVERTISED_100baseT_Full)
4300 val |= MDIO_AN_EEE_ADV_100TX;
4301 /* Advertise 1000-BaseT EEE ability */
4302 if (advertise & ADVERTISED_1000baseT_Full)
4303 val |= MDIO_AN_EEE_ADV_1000T;
4305 if (!tp->eee.eee_enabled) {
4307 tp->eee.advertised = 0;
4309 tp->eee.advertised = advertise &
4310 (ADVERTISED_100baseT_Full |
4311 ADVERTISED_1000baseT_Full);
4314 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4318 switch (tg3_asic_rev(tp)) {
4320 case ASIC_REV_57765:
4321 case ASIC_REV_57766:
4323 /* If we advertised any eee advertisements above... */
4325 val = MII_TG3_DSP_TAP26_ALNOKO |
4326 MII_TG3_DSP_TAP26_RMRXSTO |
4327 MII_TG3_DSP_TAP26_OPCSINPT;
4328 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4332 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4333 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4334 MII_TG3_DSP_CH34TP2_HIBW01);
4337 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4346 static void tg3_phy_copper_begin(struct tg3 *tp)
4348 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4349 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4352 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4353 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4354 adv = ADVERTISED_10baseT_Half |
4355 ADVERTISED_10baseT_Full;
4356 if (tg3_flag(tp, WOL_SPEED_100MB))
4357 adv |= ADVERTISED_100baseT_Half |
4358 ADVERTISED_100baseT_Full;
4359 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4360 adv |= ADVERTISED_1000baseT_Half |
4361 ADVERTISED_1000baseT_Full;
4363 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4365 adv = tp->link_config.advertising;
4366 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4367 adv &= ~(ADVERTISED_1000baseT_Half |
4368 ADVERTISED_1000baseT_Full);
4370 fc = tp->link_config.flowctrl;
4373 tg3_phy_autoneg_cfg(tp, adv, fc);
4375 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4376 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4377 /* Normally during power down we want to autonegotiate
4378 * the lowest possible speed for WOL. However, to avoid
4379 * link flap, we leave it untouched.
4384 tg3_writephy(tp, MII_BMCR,
4385 BMCR_ANENABLE | BMCR_ANRESTART);
4388 u32 bmcr, orig_bmcr;
4390 tp->link_config.active_speed = tp->link_config.speed;
4391 tp->link_config.active_duplex = tp->link_config.duplex;
4393 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4394 /* With autoneg disabled, 5715 only links up when the
4395 * advertisement register has the configured speed
4398 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4402 switch (tp->link_config.speed) {
4408 bmcr |= BMCR_SPEED100;
4412 bmcr |= BMCR_SPEED1000;
4416 if (tp->link_config.duplex == DUPLEX_FULL)
4417 bmcr |= BMCR_FULLDPLX;
4419 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4420 (bmcr != orig_bmcr)) {
4421 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4422 for (i = 0; i < 1500; i++) {
4426 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4427 tg3_readphy(tp, MII_BMSR, &tmp))
4429 if (!(tmp & BMSR_LSTATUS)) {
4434 tg3_writephy(tp, MII_BMCR, bmcr);
4440 static int tg3_phy_pull_config(struct tg3 *tp)
4445 err = tg3_readphy(tp, MII_BMCR, &val);
4449 if (!(val & BMCR_ANENABLE)) {
4450 tp->link_config.autoneg = AUTONEG_DISABLE;
4451 tp->link_config.advertising = 0;
4452 tg3_flag_clear(tp, PAUSE_AUTONEG);
4456 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4458 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4461 tp->link_config.speed = SPEED_10;
4464 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4467 tp->link_config.speed = SPEED_100;
4469 case BMCR_SPEED1000:
4470 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4471 tp->link_config.speed = SPEED_1000;
4479 if (val & BMCR_FULLDPLX)
4480 tp->link_config.duplex = DUPLEX_FULL;
4482 tp->link_config.duplex = DUPLEX_HALF;
4484 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4490 tp->link_config.autoneg = AUTONEG_ENABLE;
4491 tp->link_config.advertising = ADVERTISED_Autoneg;
4492 tg3_flag_set(tp, PAUSE_AUTONEG);
4494 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4497 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4501 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4502 tp->link_config.advertising |= adv | ADVERTISED_TP;
4504 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4506 tp->link_config.advertising |= ADVERTISED_FIBRE;
4509 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4512 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4513 err = tg3_readphy(tp, MII_CTRL1000, &val);
4517 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4519 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4523 adv = tg3_decode_flowctrl_1000X(val);
4524 tp->link_config.flowctrl = adv;
4526 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4527 adv = mii_adv_to_ethtool_adv_x(val);
4530 tp->link_config.advertising |= adv;
4537 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4541 /* Turn off tap power management. */
4542 /* Set Extended packet length bit */
4543 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4545 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4546 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4547 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4548 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4549 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4556 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4558 struct ethtool_eee eee;
4560 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4563 tg3_eee_pull_config(tp, &eee);
4565 if (tp->eee.eee_enabled) {
4566 if (tp->eee.advertised != eee.advertised ||
4567 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4568 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4571 /* EEE is disabled but we're advertising */
4579 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4581 u32 advmsk, tgtadv, advertising;
4583 advertising = tp->link_config.advertising;
4584 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4586 advmsk = ADVERTISE_ALL;
4587 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4588 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4589 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4592 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4595 if ((*lcladv & advmsk) != tgtadv)
4598 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4601 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4603 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4607 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4608 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4609 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4610 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4611 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4613 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4616 if (tg3_ctrl != tgtadv)
4623 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4627 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4630 if (tg3_readphy(tp, MII_STAT1000, &val))
4633 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4636 if (tg3_readphy(tp, MII_LPA, rmtadv))
4639 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4640 tp->link_config.rmt_adv = lpeth;
4645 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4647 if (curr_link_up != tp->link_up) {
4649 netif_carrier_on(tp->dev);
4651 netif_carrier_off(tp->dev);
4652 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4653 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4656 tg3_link_report(tp);
4663 static void tg3_clear_mac_status(struct tg3 *tp)
4668 MAC_STATUS_SYNC_CHANGED |
4669 MAC_STATUS_CFG_CHANGED |
4670 MAC_STATUS_MI_COMPLETION |
4671 MAC_STATUS_LNKSTATE_CHANGED);
4675 static void tg3_setup_eee(struct tg3 *tp)
4679 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4680 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4681 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4682 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4684 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4686 tw32_f(TG3_CPMU_EEE_CTRL,
4687 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4689 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4690 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4691 TG3_CPMU_EEEMD_LPI_IN_RX |
4692 TG3_CPMU_EEEMD_EEE_ENABLE;
4694 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4695 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4697 if (tg3_flag(tp, ENABLE_APE))
4698 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4700 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4702 tw32_f(TG3_CPMU_EEE_DBTMR1,
4703 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4704 (tp->eee.tx_lpi_timer & 0xffff));
4706 tw32_f(TG3_CPMU_EEE_DBTMR2,
4707 TG3_CPMU_DBTMR2_APE_TX_2047US |
4708 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4711 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4713 bool current_link_up;
4715 u32 lcl_adv, rmt_adv;
4720 tg3_clear_mac_status(tp);
4722 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4724 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4728 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4730 /* Some third-party PHYs need to be reset on link going
4733 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4734 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4735 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4737 tg3_readphy(tp, MII_BMSR, &bmsr);
4738 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4739 !(bmsr & BMSR_LSTATUS))
4745 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4746 tg3_readphy(tp, MII_BMSR, &bmsr);
4747 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4748 !tg3_flag(tp, INIT_COMPLETE))
4751 if (!(bmsr & BMSR_LSTATUS)) {
4752 err = tg3_init_5401phy_dsp(tp);
4756 tg3_readphy(tp, MII_BMSR, &bmsr);
4757 for (i = 0; i < 1000; i++) {
4759 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4760 (bmsr & BMSR_LSTATUS)) {
4766 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4767 TG3_PHY_REV_BCM5401_B0 &&
4768 !(bmsr & BMSR_LSTATUS) &&
4769 tp->link_config.active_speed == SPEED_1000) {
4770 err = tg3_phy_reset(tp);
4772 err = tg3_init_5401phy_dsp(tp);
4777 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4778 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4779 /* 5701 {A0,B0} CRC bug workaround */
4780 tg3_writephy(tp, 0x15, 0x0a75);
4781 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4782 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4783 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4786 /* Clear pending interrupts... */
4787 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4788 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4790 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4791 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4792 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4793 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4795 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4796 tg3_asic_rev(tp) == ASIC_REV_5701) {
4797 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4798 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4799 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4801 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4804 current_link_up = false;
4805 current_speed = SPEED_UNKNOWN;
4806 current_duplex = DUPLEX_UNKNOWN;
4807 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4808 tp->link_config.rmt_adv = 0;
4810 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4811 err = tg3_phy_auxctl_read(tp,
4812 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4814 if (!err && !(val & (1 << 10))) {
4815 tg3_phy_auxctl_write(tp,
4816 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4823 for (i = 0; i < 100; i++) {
4824 tg3_readphy(tp, MII_BMSR, &bmsr);
4825 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4826 (bmsr & BMSR_LSTATUS))
4831 if (bmsr & BMSR_LSTATUS) {
4834 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4835 for (i = 0; i < 2000; i++) {
4837 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4842 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4847 for (i = 0; i < 200; i++) {
4848 tg3_readphy(tp, MII_BMCR, &bmcr);
4849 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4851 if (bmcr && bmcr != 0x7fff)
4859 tp->link_config.active_speed = current_speed;
4860 tp->link_config.active_duplex = current_duplex;
4862 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4863 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4865 if ((bmcr & BMCR_ANENABLE) &&
4867 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4868 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4869 current_link_up = true;
4871 /* EEE settings changes take effect only after a phy
4872 * reset. If we have skipped a reset due to Link Flap
4873 * Avoidance being enabled, do it now.
4875 if (!eee_config_ok &&
4876 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4882 if (!(bmcr & BMCR_ANENABLE) &&
4883 tp->link_config.speed == current_speed &&
4884 tp->link_config.duplex == current_duplex) {
4885 current_link_up = true;
4889 if (current_link_up &&
4890 tp->link_config.active_duplex == DUPLEX_FULL) {
4893 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4894 reg = MII_TG3_FET_GEN_STAT;
4895 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4897 reg = MII_TG3_EXT_STAT;
4898 bit = MII_TG3_EXT_STAT_MDIX;
4901 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4902 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4904 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4909 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4910 tg3_phy_copper_begin(tp);
4912 if (tg3_flag(tp, ROBOSWITCH)) {
4913 current_link_up = true;
4914 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4915 current_speed = SPEED_1000;
4916 current_duplex = DUPLEX_FULL;
4917 tp->link_config.active_speed = current_speed;
4918 tp->link_config.active_duplex = current_duplex;
4921 tg3_readphy(tp, MII_BMSR, &bmsr);
4922 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4923 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4924 current_link_up = true;
4927 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4928 if (current_link_up) {
4929 if (tp->link_config.active_speed == SPEED_100 ||
4930 tp->link_config.active_speed == SPEED_10)
4931 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4933 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4934 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4935 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4937 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4939 /* In order for the 5750 core in BCM4785 chip to work properly
4940 * in RGMII mode, the Led Control Register must be set up.
4942 if (tg3_flag(tp, RGMII_MODE)) {
4943 u32 led_ctrl = tr32(MAC_LED_CTRL);
4944 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4946 if (tp->link_config.active_speed == SPEED_10)
4947 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4948 else if (tp->link_config.active_speed == SPEED_100)
4949 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4950 LED_CTRL_100MBPS_ON);
4951 else if (tp->link_config.active_speed == SPEED_1000)
4952 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4953 LED_CTRL_1000MBPS_ON);
4955 tw32(MAC_LED_CTRL, led_ctrl);
4959 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4960 if (tp->link_config.active_duplex == DUPLEX_HALF)
4961 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4963 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4964 if (current_link_up &&
4965 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4966 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4968 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4971 /* ??? Without this setting Netgear GA302T PHY does not
4972 * ??? send/receive packets...
4974 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4975 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4976 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4977 tw32_f(MAC_MI_MODE, tp->mi_mode);
4981 tw32_f(MAC_MODE, tp->mac_mode);
4984 tg3_phy_eee_adjust(tp, current_link_up);
4986 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4987 /* Polled via timer. */
4988 tw32_f(MAC_EVENT, 0);
4990 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4994 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4996 tp->link_config.active_speed == SPEED_1000 &&
4997 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5000 (MAC_STATUS_SYNC_CHANGED |
5001 MAC_STATUS_CFG_CHANGED));
5004 NIC_SRAM_FIRMWARE_MBOX,
5005 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5008 /* Prevent send BD corruption. */
5009 if (tg3_flag(tp, CLKREQ_BUG)) {
5010 if (tp->link_config.active_speed == SPEED_100 ||
5011 tp->link_config.active_speed == SPEED_10)
5012 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5013 PCI_EXP_LNKCTL_CLKREQ_EN);
5015 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5016 PCI_EXP_LNKCTL_CLKREQ_EN);
5019 tg3_test_and_report_link_chg(tp, current_link_up);
5024 struct tg3_fiber_aneginfo {
5026 #define ANEG_STATE_UNKNOWN 0
5027 #define ANEG_STATE_AN_ENABLE 1
5028 #define ANEG_STATE_RESTART_INIT 2
5029 #define ANEG_STATE_RESTART 3
5030 #define ANEG_STATE_DISABLE_LINK_OK 4
5031 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5032 #define ANEG_STATE_ABILITY_DETECT 6
5033 #define ANEG_STATE_ACK_DETECT_INIT 7
5034 #define ANEG_STATE_ACK_DETECT 8
5035 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5036 #define ANEG_STATE_COMPLETE_ACK 10
5037 #define ANEG_STATE_IDLE_DETECT_INIT 11
5038 #define ANEG_STATE_IDLE_DETECT 12
5039 #define ANEG_STATE_LINK_OK 13
5040 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5041 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5044 #define MR_AN_ENABLE 0x00000001
5045 #define MR_RESTART_AN 0x00000002
5046 #define MR_AN_COMPLETE 0x00000004
5047 #define MR_PAGE_RX 0x00000008
5048 #define MR_NP_LOADED 0x00000010
5049 #define MR_TOGGLE_TX 0x00000020
5050 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5051 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5052 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5053 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5054 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5055 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5056 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5057 #define MR_TOGGLE_RX 0x00002000
5058 #define MR_NP_RX 0x00004000
5060 #define MR_LINK_OK 0x80000000
5062 unsigned long link_time, cur_time;
5064 u32 ability_match_cfg;
5065 int ability_match_count;
5067 char ability_match, idle_match, ack_match;
5069 u32 txconfig, rxconfig;
5070 #define ANEG_CFG_NP 0x00000080
5071 #define ANEG_CFG_ACK 0x00000040
5072 #define ANEG_CFG_RF2 0x00000020
5073 #define ANEG_CFG_RF1 0x00000010
5074 #define ANEG_CFG_PS2 0x00000001
5075 #define ANEG_CFG_PS1 0x00008000
5076 #define ANEG_CFG_HD 0x00004000
5077 #define ANEG_CFG_FD 0x00002000
5078 #define ANEG_CFG_INVAL 0x00001f06
5083 #define ANEG_TIMER_ENAB 2
5084 #define ANEG_FAILED -1
5086 #define ANEG_STATE_SETTLE_TIME 10000
5088 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5089 struct tg3_fiber_aneginfo *ap)
5092 unsigned long delta;
5096 if (ap->state == ANEG_STATE_UNKNOWN) {
5100 ap->ability_match_cfg = 0;
5101 ap->ability_match_count = 0;
5102 ap->ability_match = 0;
5108 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5109 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5111 if (rx_cfg_reg != ap->ability_match_cfg) {
5112 ap->ability_match_cfg = rx_cfg_reg;
5113 ap->ability_match = 0;
5114 ap->ability_match_count = 0;
5116 if (++ap->ability_match_count > 1) {
5117 ap->ability_match = 1;
5118 ap->ability_match_cfg = rx_cfg_reg;
5121 if (rx_cfg_reg & ANEG_CFG_ACK)
5129 ap->ability_match_cfg = 0;
5130 ap->ability_match_count = 0;
5131 ap->ability_match = 0;
5137 ap->rxconfig = rx_cfg_reg;
5140 switch (ap->state) {
5141 case ANEG_STATE_UNKNOWN:
5142 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5143 ap->state = ANEG_STATE_AN_ENABLE;
5146 case ANEG_STATE_AN_ENABLE:
5147 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5148 if (ap->flags & MR_AN_ENABLE) {
5151 ap->ability_match_cfg = 0;
5152 ap->ability_match_count = 0;
5153 ap->ability_match = 0;
5157 ap->state = ANEG_STATE_RESTART_INIT;
5159 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5163 case ANEG_STATE_RESTART_INIT:
5164 ap->link_time = ap->cur_time;
5165 ap->flags &= ~(MR_NP_LOADED);
5167 tw32(MAC_TX_AUTO_NEG, 0);
5168 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5169 tw32_f(MAC_MODE, tp->mac_mode);
5172 ret = ANEG_TIMER_ENAB;
5173 ap->state = ANEG_STATE_RESTART;
5176 case ANEG_STATE_RESTART:
5177 delta = ap->cur_time - ap->link_time;
5178 if (delta > ANEG_STATE_SETTLE_TIME)
5179 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5181 ret = ANEG_TIMER_ENAB;
5184 case ANEG_STATE_DISABLE_LINK_OK:
5188 case ANEG_STATE_ABILITY_DETECT_INIT:
5189 ap->flags &= ~(MR_TOGGLE_TX);
5190 ap->txconfig = ANEG_CFG_FD;
5191 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5192 if (flowctrl & ADVERTISE_1000XPAUSE)
5193 ap->txconfig |= ANEG_CFG_PS1;
5194 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5195 ap->txconfig |= ANEG_CFG_PS2;
5196 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5197 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5198 tw32_f(MAC_MODE, tp->mac_mode);
5201 ap->state = ANEG_STATE_ABILITY_DETECT;
5204 case ANEG_STATE_ABILITY_DETECT:
5205 if (ap->ability_match != 0 && ap->rxconfig != 0)
5206 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5209 case ANEG_STATE_ACK_DETECT_INIT:
5210 ap->txconfig |= ANEG_CFG_ACK;
5211 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5212 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5213 tw32_f(MAC_MODE, tp->mac_mode);
5216 ap->state = ANEG_STATE_ACK_DETECT;
5219 case ANEG_STATE_ACK_DETECT:
5220 if (ap->ack_match != 0) {
5221 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5222 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5223 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5225 ap->state = ANEG_STATE_AN_ENABLE;
5227 } else if (ap->ability_match != 0 &&
5228 ap->rxconfig == 0) {
5229 ap->state = ANEG_STATE_AN_ENABLE;
5233 case ANEG_STATE_COMPLETE_ACK_INIT:
5234 if (ap->rxconfig & ANEG_CFG_INVAL) {
5238 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5239 MR_LP_ADV_HALF_DUPLEX |
5240 MR_LP_ADV_SYM_PAUSE |
5241 MR_LP_ADV_ASYM_PAUSE |
5242 MR_LP_ADV_REMOTE_FAULT1 |
5243 MR_LP_ADV_REMOTE_FAULT2 |
5244 MR_LP_ADV_NEXT_PAGE |
5247 if (ap->rxconfig & ANEG_CFG_FD)
5248 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5249 if (ap->rxconfig & ANEG_CFG_HD)
5250 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5251 if (ap->rxconfig & ANEG_CFG_PS1)
5252 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5253 if (ap->rxconfig & ANEG_CFG_PS2)
5254 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5255 if (ap->rxconfig & ANEG_CFG_RF1)
5256 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5257 if (ap->rxconfig & ANEG_CFG_RF2)
5258 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5259 if (ap->rxconfig & ANEG_CFG_NP)
5260 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5262 ap->link_time = ap->cur_time;
5264 ap->flags ^= (MR_TOGGLE_TX);
5265 if (ap->rxconfig & 0x0008)
5266 ap->flags |= MR_TOGGLE_RX;
5267 if (ap->rxconfig & ANEG_CFG_NP)
5268 ap->flags |= MR_NP_RX;
5269 ap->flags |= MR_PAGE_RX;
5271 ap->state = ANEG_STATE_COMPLETE_ACK;
5272 ret = ANEG_TIMER_ENAB;
5275 case ANEG_STATE_COMPLETE_ACK:
5276 if (ap->ability_match != 0 &&
5277 ap->rxconfig == 0) {
5278 ap->state = ANEG_STATE_AN_ENABLE;
5281 delta = ap->cur_time - ap->link_time;
5282 if (delta > ANEG_STATE_SETTLE_TIME) {
5283 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5284 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5286 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5287 !(ap->flags & MR_NP_RX)) {
5288 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5296 case ANEG_STATE_IDLE_DETECT_INIT:
5297 ap->link_time = ap->cur_time;
5298 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5299 tw32_f(MAC_MODE, tp->mac_mode);
5302 ap->state = ANEG_STATE_IDLE_DETECT;
5303 ret = ANEG_TIMER_ENAB;
5306 case ANEG_STATE_IDLE_DETECT:
5307 if (ap->ability_match != 0 &&
5308 ap->rxconfig == 0) {
5309 ap->state = ANEG_STATE_AN_ENABLE;
5312 delta = ap->cur_time - ap->link_time;
5313 if (delta > ANEG_STATE_SETTLE_TIME) {
5314 /* XXX another gem from the Broadcom driver :( */
5315 ap->state = ANEG_STATE_LINK_OK;
5319 case ANEG_STATE_LINK_OK:
5320 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5324 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5325 /* ??? unimplemented */
5328 case ANEG_STATE_NEXT_PAGE_WAIT:
5329 /* ??? unimplemented */
5340 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5343 struct tg3_fiber_aneginfo aninfo;
5344 int status = ANEG_FAILED;
5348 tw32_f(MAC_TX_AUTO_NEG, 0);
5350 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5351 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5354 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5357 memset(&aninfo, 0, sizeof(aninfo));
5358 aninfo.flags |= MR_AN_ENABLE;
5359 aninfo.state = ANEG_STATE_UNKNOWN;
5360 aninfo.cur_time = 0;
5362 while (++tick < 195000) {
5363 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5364 if (status == ANEG_DONE || status == ANEG_FAILED)
5370 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5371 tw32_f(MAC_MODE, tp->mac_mode);
5374 *txflags = aninfo.txconfig;
5375 *rxflags = aninfo.flags;
5377 if (status == ANEG_DONE &&
5378 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5379 MR_LP_ADV_FULL_DUPLEX)))
5385 static void tg3_init_bcm8002(struct tg3 *tp)
5387 u32 mac_status = tr32(MAC_STATUS);
5390 /* Reset when initting first time or we have a link. */
5391 if (tg3_flag(tp, INIT_COMPLETE) &&
5392 !(mac_status & MAC_STATUS_PCS_SYNCED))
5395 /* Set PLL lock range. */
5396 tg3_writephy(tp, 0x16, 0x8007);
5399 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5401 /* Wait for reset to complete. */
5402 /* XXX schedule_timeout() ... */
5403 for (i = 0; i < 500; i++)
5406 /* Config mode; select PMA/Ch 1 regs. */
5407 tg3_writephy(tp, 0x10, 0x8411);
5409 /* Enable auto-lock and comdet, select txclk for tx. */
5410 tg3_writephy(tp, 0x11, 0x0a10);
5412 tg3_writephy(tp, 0x18, 0x00a0);
5413 tg3_writephy(tp, 0x16, 0x41ff);
5415 /* Assert and deassert POR. */
5416 tg3_writephy(tp, 0x13, 0x0400);
5418 tg3_writephy(tp, 0x13, 0x0000);
5420 tg3_writephy(tp, 0x11, 0x0a50);
5422 tg3_writephy(tp, 0x11, 0x0a10);
5424 /* Wait for signal to stabilize */
5425 /* XXX schedule_timeout() ... */
5426 for (i = 0; i < 15000; i++)
5429 /* Deselect the channel register so we can read the PHYID
5432 tg3_writephy(tp, 0x10, 0x8011);
5435 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5438 bool current_link_up;
5439 u32 sg_dig_ctrl, sg_dig_status;
5440 u32 serdes_cfg, expected_sg_dig_ctrl;
5441 int workaround, port_a;
5444 expected_sg_dig_ctrl = 0;
5447 current_link_up = false;
5449 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5450 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5452 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5455 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5456 /* preserve bits 20-23 for voltage regulator */
5457 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5460 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5462 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5463 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5465 u32 val = serdes_cfg;
5471 tw32_f(MAC_SERDES_CFG, val);
5474 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5476 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5477 tg3_setup_flow_control(tp, 0, 0);
5478 current_link_up = true;
5483 /* Want auto-negotiation. */
5484 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5486 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5487 if (flowctrl & ADVERTISE_1000XPAUSE)
5488 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5489 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5490 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5492 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5493 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5494 tp->serdes_counter &&
5495 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5496 MAC_STATUS_RCVD_CFG)) ==
5497 MAC_STATUS_PCS_SYNCED)) {
5498 tp->serdes_counter--;
5499 current_link_up = true;
5504 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5505 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5507 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5509 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5510 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5511 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5512 MAC_STATUS_SIGNAL_DET)) {
5513 sg_dig_status = tr32(SG_DIG_STATUS);
5514 mac_status = tr32(MAC_STATUS);
5516 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5517 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5518 u32 local_adv = 0, remote_adv = 0;
5520 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5521 local_adv |= ADVERTISE_1000XPAUSE;
5522 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5523 local_adv |= ADVERTISE_1000XPSE_ASYM;
5525 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5526 remote_adv |= LPA_1000XPAUSE;
5527 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5528 remote_adv |= LPA_1000XPAUSE_ASYM;
5530 tp->link_config.rmt_adv =
5531 mii_adv_to_ethtool_adv_x(remote_adv);
5533 tg3_setup_flow_control(tp, local_adv, remote_adv);
5534 current_link_up = true;
5535 tp->serdes_counter = 0;
5536 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5537 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5538 if (tp->serdes_counter)
5539 tp->serdes_counter--;
5542 u32 val = serdes_cfg;
5549 tw32_f(MAC_SERDES_CFG, val);
5552 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5555 /* Link parallel detection - link is up */
5556 /* only if we have PCS_SYNC and not */
5557 /* receiving config code words */
5558 mac_status = tr32(MAC_STATUS);
5559 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5560 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5561 tg3_setup_flow_control(tp, 0, 0);
5562 current_link_up = true;
5564 TG3_PHYFLG_PARALLEL_DETECT;
5565 tp->serdes_counter =
5566 SERDES_PARALLEL_DET_TIMEOUT;
5568 goto restart_autoneg;
5572 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5573 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5577 return current_link_up;
5580 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5582 bool current_link_up = false;
5584 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5587 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5588 u32 txflags, rxflags;
5591 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5592 u32 local_adv = 0, remote_adv = 0;
5594 if (txflags & ANEG_CFG_PS1)
5595 local_adv |= ADVERTISE_1000XPAUSE;
5596 if (txflags & ANEG_CFG_PS2)
5597 local_adv |= ADVERTISE_1000XPSE_ASYM;
5599 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5600 remote_adv |= LPA_1000XPAUSE;
5601 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5602 remote_adv |= LPA_1000XPAUSE_ASYM;
5604 tp->link_config.rmt_adv =
5605 mii_adv_to_ethtool_adv_x(remote_adv);
5607 tg3_setup_flow_control(tp, local_adv, remote_adv);
5609 current_link_up = true;
5611 for (i = 0; i < 30; i++) {
5614 (MAC_STATUS_SYNC_CHANGED |
5615 MAC_STATUS_CFG_CHANGED));
5617 if ((tr32(MAC_STATUS) &
5618 (MAC_STATUS_SYNC_CHANGED |
5619 MAC_STATUS_CFG_CHANGED)) == 0)
5623 mac_status = tr32(MAC_STATUS);
5624 if (!current_link_up &&
5625 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5626 !(mac_status & MAC_STATUS_RCVD_CFG))
5627 current_link_up = true;
5629 tg3_setup_flow_control(tp, 0, 0);
5631 /* Forcing 1000FD link up. */
5632 current_link_up = true;
5634 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5637 tw32_f(MAC_MODE, tp->mac_mode);
5642 return current_link_up;
5645 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5648 u16 orig_active_speed;
5649 u8 orig_active_duplex;
5651 bool current_link_up;
5654 orig_pause_cfg = tp->link_config.active_flowctrl;
5655 orig_active_speed = tp->link_config.active_speed;
5656 orig_active_duplex = tp->link_config.active_duplex;
5658 if (!tg3_flag(tp, HW_AUTONEG) &&
5660 tg3_flag(tp, INIT_COMPLETE)) {
5661 mac_status = tr32(MAC_STATUS);
5662 mac_status &= (MAC_STATUS_PCS_SYNCED |
5663 MAC_STATUS_SIGNAL_DET |
5664 MAC_STATUS_CFG_CHANGED |
5665 MAC_STATUS_RCVD_CFG);
5666 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5667 MAC_STATUS_SIGNAL_DET)) {
5668 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5669 MAC_STATUS_CFG_CHANGED));
5674 tw32_f(MAC_TX_AUTO_NEG, 0);
5676 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5677 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5678 tw32_f(MAC_MODE, tp->mac_mode);
5681 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5682 tg3_init_bcm8002(tp);
5684 /* Enable link change event even when serdes polling. */
5685 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5688 current_link_up = false;
5689 tp->link_config.rmt_adv = 0;
5690 mac_status = tr32(MAC_STATUS);
5692 if (tg3_flag(tp, HW_AUTONEG))
5693 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5695 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5697 tp->napi[0].hw_status->status =
5698 (SD_STATUS_UPDATED |
5699 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5701 for (i = 0; i < 100; i++) {
5702 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5703 MAC_STATUS_CFG_CHANGED));
5705 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5706 MAC_STATUS_CFG_CHANGED |
5707 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5711 mac_status = tr32(MAC_STATUS);
5712 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5713 current_link_up = false;
5714 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5715 tp->serdes_counter == 0) {
5716 tw32_f(MAC_MODE, (tp->mac_mode |
5717 MAC_MODE_SEND_CONFIGS));
5719 tw32_f(MAC_MODE, tp->mac_mode);
5723 if (current_link_up) {
5724 tp->link_config.active_speed = SPEED_1000;
5725 tp->link_config.active_duplex = DUPLEX_FULL;
5726 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5727 LED_CTRL_LNKLED_OVERRIDE |
5728 LED_CTRL_1000MBPS_ON));
5730 tp->link_config.active_speed = SPEED_UNKNOWN;
5731 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5732 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5733 LED_CTRL_LNKLED_OVERRIDE |
5734 LED_CTRL_TRAFFIC_OVERRIDE));
5737 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5738 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5739 if (orig_pause_cfg != now_pause_cfg ||
5740 orig_active_speed != tp->link_config.active_speed ||
5741 orig_active_duplex != tp->link_config.active_duplex)
5742 tg3_link_report(tp);
5748 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5752 u16 current_speed = SPEED_UNKNOWN;
5753 u8 current_duplex = DUPLEX_UNKNOWN;
5754 bool current_link_up = false;
5755 u32 local_adv, remote_adv, sgsr;
5757 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5758 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5759 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5760 (sgsr & SERDES_TG3_SGMII_MODE)) {
5765 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5767 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5768 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5770 current_link_up = true;
5771 if (sgsr & SERDES_TG3_SPEED_1000) {
5772 current_speed = SPEED_1000;
5773 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5774 } else if (sgsr & SERDES_TG3_SPEED_100) {
5775 current_speed = SPEED_100;
5776 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5778 current_speed = SPEED_10;
5779 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5782 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5783 current_duplex = DUPLEX_FULL;
5785 current_duplex = DUPLEX_HALF;
5788 tw32_f(MAC_MODE, tp->mac_mode);
5791 tg3_clear_mac_status(tp);
5793 goto fiber_setup_done;
5796 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5797 tw32_f(MAC_MODE, tp->mac_mode);
5800 tg3_clear_mac_status(tp);
5805 tp->link_config.rmt_adv = 0;
5807 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5808 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5809 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5810 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5811 bmsr |= BMSR_LSTATUS;
5813 bmsr &= ~BMSR_LSTATUS;
5816 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5818 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5819 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5820 /* do nothing, just check for link up at the end */
5821 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5824 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5825 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5826 ADVERTISE_1000XPAUSE |
5827 ADVERTISE_1000XPSE_ASYM |
5830 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5831 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5833 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5834 tg3_writephy(tp, MII_ADVERTISE, newadv);
5835 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5836 tg3_writephy(tp, MII_BMCR, bmcr);
5838 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5839 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5840 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5847 bmcr &= ~BMCR_SPEED1000;
5848 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5850 if (tp->link_config.duplex == DUPLEX_FULL)
5851 new_bmcr |= BMCR_FULLDPLX;
5853 if (new_bmcr != bmcr) {
5854 /* BMCR_SPEED1000 is a reserved bit that needs
5855 * to be set on write.
5857 new_bmcr |= BMCR_SPEED1000;
5859 /* Force a linkdown */
5863 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5864 adv &= ~(ADVERTISE_1000XFULL |
5865 ADVERTISE_1000XHALF |
5867 tg3_writephy(tp, MII_ADVERTISE, adv);
5868 tg3_writephy(tp, MII_BMCR, bmcr |
5872 tg3_carrier_off(tp);
5874 tg3_writephy(tp, MII_BMCR, new_bmcr);
5876 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5877 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5878 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5879 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5880 bmsr |= BMSR_LSTATUS;
5882 bmsr &= ~BMSR_LSTATUS;
5884 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5888 if (bmsr & BMSR_LSTATUS) {
5889 current_speed = SPEED_1000;
5890 current_link_up = true;
5891 if (bmcr & BMCR_FULLDPLX)
5892 current_duplex = DUPLEX_FULL;
5894 current_duplex = DUPLEX_HALF;
5899 if (bmcr & BMCR_ANENABLE) {
5902 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5903 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5904 common = local_adv & remote_adv;
5905 if (common & (ADVERTISE_1000XHALF |
5906 ADVERTISE_1000XFULL)) {
5907 if (common & ADVERTISE_1000XFULL)
5908 current_duplex = DUPLEX_FULL;
5910 current_duplex = DUPLEX_HALF;
5912 tp->link_config.rmt_adv =
5913 mii_adv_to_ethtool_adv_x(remote_adv);
5914 } else if (!tg3_flag(tp, 5780_CLASS)) {
5915 /* Link is up via parallel detect */
5917 current_link_up = false;
5923 if (current_link_up && current_duplex == DUPLEX_FULL)
5924 tg3_setup_flow_control(tp, local_adv, remote_adv);
5926 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5927 if (tp->link_config.active_duplex == DUPLEX_HALF)
5928 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5930 tw32_f(MAC_MODE, tp->mac_mode);
5933 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5935 tp->link_config.active_speed = current_speed;
5936 tp->link_config.active_duplex = current_duplex;
5938 tg3_test_and_report_link_chg(tp, current_link_up);
5942 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5944 if (tp->serdes_counter) {
5945 /* Give autoneg time to complete. */
5946 tp->serdes_counter--;
5951 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5954 tg3_readphy(tp, MII_BMCR, &bmcr);
5955 if (bmcr & BMCR_ANENABLE) {
5958 /* Select shadow register 0x1f */
5959 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5960 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5962 /* Select expansion interrupt status register */
5963 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5964 MII_TG3_DSP_EXP1_INT_STAT);
5965 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5966 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5968 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5969 /* We have signal detect and not receiving
5970 * config code words, link is up by parallel
5974 bmcr &= ~BMCR_ANENABLE;
5975 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5976 tg3_writephy(tp, MII_BMCR, bmcr);
5977 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5980 } else if (tp->link_up &&
5981 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5982 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5985 /* Select expansion interrupt status register */
5986 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5987 MII_TG3_DSP_EXP1_INT_STAT);
5988 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5992 /* Config code words received, turn on autoneg. */
5993 tg3_readphy(tp, MII_BMCR, &bmcr);
5994 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5996 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6002 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6007 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6008 err = tg3_setup_fiber_phy(tp, force_reset);
6009 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6010 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6012 err = tg3_setup_copper_phy(tp, force_reset);
6014 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6017 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6018 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6020 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6025 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6026 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6027 tw32(GRC_MISC_CFG, val);
6030 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6031 (6 << TX_LENGTHS_IPG_SHIFT);
6032 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6033 tg3_asic_rev(tp) == ASIC_REV_5762)
6034 val |= tr32(MAC_TX_LENGTHS) &
6035 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6036 TX_LENGTHS_CNT_DWN_VAL_MSK);
6038 if (tp->link_config.active_speed == SPEED_1000 &&
6039 tp->link_config.active_duplex == DUPLEX_HALF)
6040 tw32(MAC_TX_LENGTHS, val |
6041 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6043 tw32(MAC_TX_LENGTHS, val |
6044 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6046 if (!tg3_flag(tp, 5705_PLUS)) {
6048 tw32(HOSTCC_STAT_COAL_TICKS,
6049 tp->coal.stats_block_coalesce_usecs);
6051 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6055 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6056 val = tr32(PCIE_PWR_MGMT_THRESH);
6058 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6061 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6062 tw32(PCIE_PWR_MGMT_THRESH, val);
6068 /* tp->lock must be held */
6069 static u64 tg3_refclk_read(struct tg3 *tp)
6071 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6072 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6075 /* tp->lock must be held */
6076 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6078 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6079 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6080 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6081 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6084 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6085 static inline void tg3_full_unlock(struct tg3 *tp);
6086 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6088 struct tg3 *tp = netdev_priv(dev);
6090 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6091 SOF_TIMESTAMPING_RX_SOFTWARE |
6092 SOF_TIMESTAMPING_SOFTWARE;
6094 if (tg3_flag(tp, PTP_CAPABLE)) {
6095 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6096 SOF_TIMESTAMPING_RX_HARDWARE |
6097 SOF_TIMESTAMPING_RAW_HARDWARE;
6101 info->phc_index = ptp_clock_index(tp->ptp_clock);
6103 info->phc_index = -1;
6105 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6107 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6108 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6109 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6110 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6114 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6116 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6117 bool neg_adj = false;
6125 /* Frequency adjustment is performed using hardware with a 24 bit
6126 * accumulator and a programmable correction value. On each clk, the
6127 * correction value gets added to the accumulator and when it
6128 * overflows, the time counter is incremented/decremented.
6130 * So conversion from ppb to correction value is
6131 * ppb * (1 << 24) / 1000000000
6133 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6134 TG3_EAV_REF_CLK_CORRECT_MASK;
6136 tg3_full_lock(tp, 0);
6139 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6140 TG3_EAV_REF_CLK_CORRECT_EN |
6141 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6143 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6145 tg3_full_unlock(tp);
6150 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6152 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6154 tg3_full_lock(tp, 0);
6155 tp->ptp_adjust += delta;
6156 tg3_full_unlock(tp);
6161 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6165 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6167 tg3_full_lock(tp, 0);
6168 ns = tg3_refclk_read(tp);
6169 ns += tp->ptp_adjust;
6170 tg3_full_unlock(tp);
6172 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6173 ts->tv_nsec = remainder;
6178 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6179 const struct timespec *ts)
6182 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6184 ns = timespec_to_ns(ts);
6186 tg3_full_lock(tp, 0);
6187 tg3_refclk_write(tp, ns);
6189 tg3_full_unlock(tp);
6194 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6195 struct ptp_clock_request *rq, int on)
6200 static const struct ptp_clock_info tg3_ptp_caps = {
6201 .owner = THIS_MODULE,
6202 .name = "tg3 clock",
6203 .max_adj = 250000000,
6208 .adjfreq = tg3_ptp_adjfreq,
6209 .adjtime = tg3_ptp_adjtime,
6210 .gettime = tg3_ptp_gettime,
6211 .settime = tg3_ptp_settime,
6212 .enable = tg3_ptp_enable,
6215 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6216 struct skb_shared_hwtstamps *timestamp)
6218 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6219 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6223 /* tp->lock must be held */
6224 static void tg3_ptp_init(struct tg3 *tp)
6226 if (!tg3_flag(tp, PTP_CAPABLE))
6229 /* Initialize the hardware clock to the system time. */
6230 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6232 tp->ptp_info = tg3_ptp_caps;
6235 /* tp->lock must be held */
6236 static void tg3_ptp_resume(struct tg3 *tp)
6238 if (!tg3_flag(tp, PTP_CAPABLE))
6241 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6245 static void tg3_ptp_fini(struct tg3 *tp)
6247 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6250 ptp_clock_unregister(tp->ptp_clock);
6251 tp->ptp_clock = NULL;
6255 static inline int tg3_irq_sync(struct tg3 *tp)
6257 return tp->irq_sync;
6260 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6264 dst = (u32 *)((u8 *)dst + off);
6265 for (i = 0; i < len; i += sizeof(u32))
6266 *dst++ = tr32(off + i);
6269 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6271 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6272 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6273 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6274 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6275 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6276 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6277 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6278 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6279 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6280 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6281 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6282 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6283 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6284 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6285 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6286 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6287 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6288 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6289 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6291 if (tg3_flag(tp, SUPPORT_MSIX))
6292 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6294 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6295 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6296 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6297 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6298 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6299 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6300 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6301 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6303 if (!tg3_flag(tp, 5705_PLUS)) {
6304 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6305 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6306 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6309 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6310 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6311 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6312 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6313 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6315 if (tg3_flag(tp, NVRAM))
6316 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6319 static void tg3_dump_state(struct tg3 *tp)
6324 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6328 if (tg3_flag(tp, PCI_EXPRESS)) {
6329 /* Read up to but not including private PCI registers */
6330 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6331 regs[i / sizeof(u32)] = tr32(i);
6333 tg3_dump_legacy_regs(tp, regs);
6335 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6336 if (!regs[i + 0] && !regs[i + 1] &&
6337 !regs[i + 2] && !regs[i + 3])
6340 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6342 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6347 for (i = 0; i < tp->irq_cnt; i++) {
6348 struct tg3_napi *tnapi = &tp->napi[i];
6350 /* SW status block */
6352 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6354 tnapi->hw_status->status,
6355 tnapi->hw_status->status_tag,
6356 tnapi->hw_status->rx_jumbo_consumer,
6357 tnapi->hw_status->rx_consumer,
6358 tnapi->hw_status->rx_mini_consumer,
6359 tnapi->hw_status->idx[0].rx_producer,
6360 tnapi->hw_status->idx[0].tx_consumer);
6363 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6365 tnapi->last_tag, tnapi->last_irq_tag,
6366 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6368 tnapi->prodring.rx_std_prod_idx,
6369 tnapi->prodring.rx_std_cons_idx,
6370 tnapi->prodring.rx_jmb_prod_idx,
6371 tnapi->prodring.rx_jmb_cons_idx);
6375 /* This is called whenever we suspect that the system chipset is re-
6376 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6377 * is bogus tx completions. We try to recover by setting the
6378 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6381 static void tg3_tx_recover(struct tg3 *tp)
6383 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6384 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6386 netdev_warn(tp->dev,
6387 "The system may be re-ordering memory-mapped I/O "
6388 "cycles to the network device, attempting to recover. "
6389 "Please report the problem to the driver maintainer "
6390 "and include system chipset information.\n");
6392 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6395 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6397 /* Tell compiler to fetch tx indices from memory. */
6399 return tnapi->tx_pending -
6400 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6403 /* Tigon3 never reports partial packet sends. So we do not
6404 * need special logic to handle SKBs that have not had all
6405 * of their frags sent yet, like SunGEM does.
6407 static void tg3_tx(struct tg3_napi *tnapi)
6409 struct tg3 *tp = tnapi->tp;
6410 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6411 u32 sw_idx = tnapi->tx_cons;
6412 struct netdev_queue *txq;
6413 int index = tnapi - tp->napi;
6414 unsigned int pkts_compl = 0, bytes_compl = 0;
6416 if (tg3_flag(tp, ENABLE_TSS))
6419 txq = netdev_get_tx_queue(tp->dev, index);
6421 while (sw_idx != hw_idx) {
6422 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6423 struct sk_buff *skb = ri->skb;
6426 if (unlikely(skb == NULL)) {
6431 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6432 struct skb_shared_hwtstamps timestamp;
6433 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6434 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6436 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6438 skb_tstamp_tx(skb, ×tamp);
6441 pci_unmap_single(tp->pdev,
6442 dma_unmap_addr(ri, mapping),
6448 while (ri->fragmented) {
6449 ri->fragmented = false;
6450 sw_idx = NEXT_TX(sw_idx);
6451 ri = &tnapi->tx_buffers[sw_idx];
6454 sw_idx = NEXT_TX(sw_idx);
6456 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6457 ri = &tnapi->tx_buffers[sw_idx];
6458 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6461 pci_unmap_page(tp->pdev,
6462 dma_unmap_addr(ri, mapping),
6463 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6466 while (ri->fragmented) {
6467 ri->fragmented = false;
6468 sw_idx = NEXT_TX(sw_idx);
6469 ri = &tnapi->tx_buffers[sw_idx];
6472 sw_idx = NEXT_TX(sw_idx);
6476 bytes_compl += skb->len;
6480 if (unlikely(tx_bug)) {
6486 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6488 tnapi->tx_cons = sw_idx;
6490 /* Need to make the tx_cons update visible to tg3_start_xmit()
6491 * before checking for netif_queue_stopped(). Without the
6492 * memory barrier, there is a small possibility that tg3_start_xmit()
6493 * will miss it and cause the queue to be stopped forever.
6497 if (unlikely(netif_tx_queue_stopped(txq) &&
6498 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6499 __netif_tx_lock(txq, smp_processor_id());
6500 if (netif_tx_queue_stopped(txq) &&
6501 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6502 netif_tx_wake_queue(txq);
6503 __netif_tx_unlock(txq);
6507 static void tg3_frag_free(bool is_frag, void *data)
6510 put_page(virt_to_head_page(data));
6515 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6517 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6518 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6523 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6524 map_sz, PCI_DMA_FROMDEVICE);
6525 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6530 /* Returns size of skb allocated or < 0 on error.
6532 * We only need to fill in the address because the other members
6533 * of the RX descriptor are invariant, see tg3_init_rings.
6535 * Note the purposeful assymetry of cpu vs. chip accesses. For
6536 * posting buffers we only dirty the first cache line of the RX
6537 * descriptor (containing the address). Whereas for the RX status
6538 * buffers the cpu only reads the last cacheline of the RX descriptor
6539 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6541 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6542 u32 opaque_key, u32 dest_idx_unmasked,
6543 unsigned int *frag_size)
6545 struct tg3_rx_buffer_desc *desc;
6546 struct ring_info *map;
6549 int skb_size, data_size, dest_idx;
6551 switch (opaque_key) {
6552 case RXD_OPAQUE_RING_STD:
6553 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6554 desc = &tpr->rx_std[dest_idx];
6555 map = &tpr->rx_std_buffers[dest_idx];
6556 data_size = tp->rx_pkt_map_sz;
6559 case RXD_OPAQUE_RING_JUMBO:
6560 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6561 desc = &tpr->rx_jmb[dest_idx].std;
6562 map = &tpr->rx_jmb_buffers[dest_idx];
6563 data_size = TG3_RX_JMB_MAP_SZ;
6570 /* Do not overwrite any of the map or rp information
6571 * until we are sure we can commit to a new buffer.
6573 * Callers depend upon this behavior and assume that
6574 * we leave everything unchanged if we fail.
6576 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6577 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6578 if (skb_size <= PAGE_SIZE) {
6579 data = netdev_alloc_frag(skb_size);
6580 *frag_size = skb_size;
6582 data = kmalloc(skb_size, GFP_ATOMIC);
6588 mapping = pci_map_single(tp->pdev,
6589 data + TG3_RX_OFFSET(tp),
6591 PCI_DMA_FROMDEVICE);
6592 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6593 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6598 dma_unmap_addr_set(map, mapping, mapping);
6600 desc->addr_hi = ((u64)mapping >> 32);
6601 desc->addr_lo = ((u64)mapping & 0xffffffff);
6606 /* We only need to move over in the address because the other
6607 * members of the RX descriptor are invariant. See notes above
6608 * tg3_alloc_rx_data for full details.
6610 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6611 struct tg3_rx_prodring_set *dpr,
6612 u32 opaque_key, int src_idx,
6613 u32 dest_idx_unmasked)
6615 struct tg3 *tp = tnapi->tp;
6616 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6617 struct ring_info *src_map, *dest_map;
6618 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6621 switch (opaque_key) {
6622 case RXD_OPAQUE_RING_STD:
6623 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6624 dest_desc = &dpr->rx_std[dest_idx];
6625 dest_map = &dpr->rx_std_buffers[dest_idx];
6626 src_desc = &spr->rx_std[src_idx];
6627 src_map = &spr->rx_std_buffers[src_idx];
6630 case RXD_OPAQUE_RING_JUMBO:
6631 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6632 dest_desc = &dpr->rx_jmb[dest_idx].std;
6633 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6634 src_desc = &spr->rx_jmb[src_idx].std;
6635 src_map = &spr->rx_jmb_buffers[src_idx];
6642 dest_map->data = src_map->data;
6643 dma_unmap_addr_set(dest_map, mapping,
6644 dma_unmap_addr(src_map, mapping));
6645 dest_desc->addr_hi = src_desc->addr_hi;
6646 dest_desc->addr_lo = src_desc->addr_lo;
6648 /* Ensure that the update to the skb happens after the physical
6649 * addresses have been transferred to the new BD location.
6653 src_map->data = NULL;
6656 /* The RX ring scheme is composed of multiple rings which post fresh
6657 * buffers to the chip, and one special ring the chip uses to report
6658 * status back to the host.
6660 * The special ring reports the status of received packets to the
6661 * host. The chip does not write into the original descriptor the
6662 * RX buffer was obtained from. The chip simply takes the original
6663 * descriptor as provided by the host, updates the status and length
6664 * field, then writes this into the next status ring entry.
6666 * Each ring the host uses to post buffers to the chip is described
6667 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6668 * it is first placed into the on-chip ram. When the packet's length
6669 * is known, it walks down the TG3_BDINFO entries to select the ring.
6670 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6671 * which is within the range of the new packet's length is chosen.
6673 * The "separate ring for rx status" scheme may sound queer, but it makes
6674 * sense from a cache coherency perspective. If only the host writes
6675 * to the buffer post rings, and only the chip writes to the rx status
6676 * rings, then cache lines never move beyond shared-modified state.
6677 * If both the host and chip were to write into the same ring, cache line
6678 * eviction could occur since both entities want it in an exclusive state.
6680 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6682 struct tg3 *tp = tnapi->tp;
6683 u32 work_mask, rx_std_posted = 0;
6684 u32 std_prod_idx, jmb_prod_idx;
6685 u32 sw_idx = tnapi->rx_rcb_ptr;
6688 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6690 hw_idx = *(tnapi->rx_rcb_prod_idx);
6692 * We need to order the read of hw_idx and the read of
6693 * the opaque cookie.
6698 std_prod_idx = tpr->rx_std_prod_idx;
6699 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6700 while (sw_idx != hw_idx && budget > 0) {
6701 struct ring_info *ri;
6702 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6704 struct sk_buff *skb;
6705 dma_addr_t dma_addr;
6706 u32 opaque_key, desc_idx, *post_ptr;
6710 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6711 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6712 if (opaque_key == RXD_OPAQUE_RING_STD) {
6713 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6714 dma_addr = dma_unmap_addr(ri, mapping);
6716 post_ptr = &std_prod_idx;
6718 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6719 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6720 dma_addr = dma_unmap_addr(ri, mapping);
6722 post_ptr = &jmb_prod_idx;
6724 goto next_pkt_nopost;
6726 work_mask |= opaque_key;
6728 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6729 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6731 tg3_recycle_rx(tnapi, tpr, opaque_key,
6732 desc_idx, *post_ptr);
6734 /* Other statistics kept track of by card. */
6739 prefetch(data + TG3_RX_OFFSET(tp));
6740 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6743 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6744 RXD_FLAG_PTPSTAT_PTPV1 ||
6745 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6746 RXD_FLAG_PTPSTAT_PTPV2) {
6747 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6748 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6751 if (len > TG3_RX_COPY_THRESH(tp)) {
6753 unsigned int frag_size;
6755 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6756 *post_ptr, &frag_size);
6760 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6761 PCI_DMA_FROMDEVICE);
6763 skb = build_skb(data, frag_size);
6765 tg3_frag_free(frag_size != 0, data);
6766 goto drop_it_no_recycle;
6768 skb_reserve(skb, TG3_RX_OFFSET(tp));
6769 /* Ensure that the update to the data happens
6770 * after the usage of the old DMA mapping.
6777 tg3_recycle_rx(tnapi, tpr, opaque_key,
6778 desc_idx, *post_ptr);
6780 skb = netdev_alloc_skb(tp->dev,
6781 len + TG3_RAW_IP_ALIGN);
6783 goto drop_it_no_recycle;
6785 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6786 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6788 data + TG3_RX_OFFSET(tp),
6790 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6795 tg3_hwclock_to_timestamp(tp, tstamp,
6796 skb_hwtstamps(skb));
6798 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6799 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6800 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6801 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6802 skb->ip_summed = CHECKSUM_UNNECESSARY;
6804 skb_checksum_none_assert(skb);
6806 skb->protocol = eth_type_trans(skb, tp->dev);
6808 if (len > (tp->dev->mtu + ETH_HLEN) &&
6809 skb->protocol != htons(ETH_P_8021Q)) {
6811 goto drop_it_no_recycle;
6814 if (desc->type_flags & RXD_FLAG_VLAN &&
6815 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6816 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6817 desc->err_vlan & RXD_VLAN_MASK);
6819 napi_gro_receive(&tnapi->napi, skb);
6827 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6828 tpr->rx_std_prod_idx = std_prod_idx &
6829 tp->rx_std_ring_mask;
6830 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6831 tpr->rx_std_prod_idx);
6832 work_mask &= ~RXD_OPAQUE_RING_STD;
6837 sw_idx &= tp->rx_ret_ring_mask;
6839 /* Refresh hw_idx to see if there is new work */
6840 if (sw_idx == hw_idx) {
6841 hw_idx = *(tnapi->rx_rcb_prod_idx);
6846 /* ACK the status ring. */
6847 tnapi->rx_rcb_ptr = sw_idx;
6848 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6850 /* Refill RX ring(s). */
6851 if (!tg3_flag(tp, ENABLE_RSS)) {
6852 /* Sync BD data before updating mailbox */
6855 if (work_mask & RXD_OPAQUE_RING_STD) {
6856 tpr->rx_std_prod_idx = std_prod_idx &
6857 tp->rx_std_ring_mask;
6858 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6859 tpr->rx_std_prod_idx);
6861 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6862 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6863 tp->rx_jmb_ring_mask;
6864 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6865 tpr->rx_jmb_prod_idx);
6868 } else if (work_mask) {
6869 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6870 * updated before the producer indices can be updated.
6874 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6875 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6877 if (tnapi != &tp->napi[1]) {
6878 tp->rx_refill = true;
6879 napi_schedule(&tp->napi[1].napi);
6886 static void tg3_poll_link(struct tg3 *tp)
6888 /* handle link change and other phy events */
6889 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6890 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6892 if (sblk->status & SD_STATUS_LINK_CHG) {
6893 sblk->status = SD_STATUS_UPDATED |
6894 (sblk->status & ~SD_STATUS_LINK_CHG);
6895 spin_lock(&tp->lock);
6896 if (tg3_flag(tp, USE_PHYLIB)) {
6898 (MAC_STATUS_SYNC_CHANGED |
6899 MAC_STATUS_CFG_CHANGED |
6900 MAC_STATUS_MI_COMPLETION |
6901 MAC_STATUS_LNKSTATE_CHANGED));
6904 tg3_setup_phy(tp, false);
6905 spin_unlock(&tp->lock);
6910 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6911 struct tg3_rx_prodring_set *dpr,
6912 struct tg3_rx_prodring_set *spr)
6914 u32 si, di, cpycnt, src_prod_idx;
6918 src_prod_idx = spr->rx_std_prod_idx;
6920 /* Make sure updates to the rx_std_buffers[] entries and the
6921 * standard producer index are seen in the correct order.
6925 if (spr->rx_std_cons_idx == src_prod_idx)
6928 if (spr->rx_std_cons_idx < src_prod_idx)
6929 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6931 cpycnt = tp->rx_std_ring_mask + 1 -
6932 spr->rx_std_cons_idx;
6934 cpycnt = min(cpycnt,
6935 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6937 si = spr->rx_std_cons_idx;
6938 di = dpr->rx_std_prod_idx;
6940 for (i = di; i < di + cpycnt; i++) {
6941 if (dpr->rx_std_buffers[i].data) {
6951 /* Ensure that updates to the rx_std_buffers ring and the
6952 * shadowed hardware producer ring from tg3_recycle_skb() are
6953 * ordered correctly WRT the skb check above.
6957 memcpy(&dpr->rx_std_buffers[di],
6958 &spr->rx_std_buffers[si],
6959 cpycnt * sizeof(struct ring_info));
6961 for (i = 0; i < cpycnt; i++, di++, si++) {
6962 struct tg3_rx_buffer_desc *sbd, *dbd;
6963 sbd = &spr->rx_std[si];
6964 dbd = &dpr->rx_std[di];
6965 dbd->addr_hi = sbd->addr_hi;
6966 dbd->addr_lo = sbd->addr_lo;
6969 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6970 tp->rx_std_ring_mask;
6971 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6972 tp->rx_std_ring_mask;
6976 src_prod_idx = spr->rx_jmb_prod_idx;
6978 /* Make sure updates to the rx_jmb_buffers[] entries and
6979 * the jumbo producer index are seen in the correct order.
6983 if (spr->rx_jmb_cons_idx == src_prod_idx)
6986 if (spr->rx_jmb_cons_idx < src_prod_idx)
6987 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6989 cpycnt = tp->rx_jmb_ring_mask + 1 -
6990 spr->rx_jmb_cons_idx;
6992 cpycnt = min(cpycnt,
6993 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6995 si = spr->rx_jmb_cons_idx;
6996 di = dpr->rx_jmb_prod_idx;
6998 for (i = di; i < di + cpycnt; i++) {
6999 if (dpr->rx_jmb_buffers[i].data) {
7009 /* Ensure that updates to the rx_jmb_buffers ring and the
7010 * shadowed hardware producer ring from tg3_recycle_skb() are
7011 * ordered correctly WRT the skb check above.
7015 memcpy(&dpr->rx_jmb_buffers[di],
7016 &spr->rx_jmb_buffers[si],
7017 cpycnt * sizeof(struct ring_info));
7019 for (i = 0; i < cpycnt; i++, di++, si++) {
7020 struct tg3_rx_buffer_desc *sbd, *dbd;
7021 sbd = &spr->rx_jmb[si].std;
7022 dbd = &dpr->rx_jmb[di].std;
7023 dbd->addr_hi = sbd->addr_hi;
7024 dbd->addr_lo = sbd->addr_lo;
7027 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7028 tp->rx_jmb_ring_mask;
7029 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7030 tp->rx_jmb_ring_mask;
7036 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7038 struct tg3 *tp = tnapi->tp;
7040 /* run TX completion thread */
7041 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7043 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7047 if (!tnapi->rx_rcb_prod_idx)
7050 /* run RX thread, within the bounds set by NAPI.
7051 * All RX "locking" is done by ensuring outside
7052 * code synchronizes with tg3->napi.poll()
7054 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7055 work_done += tg3_rx(tnapi, budget - work_done);
7057 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7058 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7060 u32 std_prod_idx = dpr->rx_std_prod_idx;
7061 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7063 tp->rx_refill = false;
7064 for (i = 1; i <= tp->rxq_cnt; i++)
7065 err |= tg3_rx_prodring_xfer(tp, dpr,
7066 &tp->napi[i].prodring);
7070 if (std_prod_idx != dpr->rx_std_prod_idx)
7071 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7072 dpr->rx_std_prod_idx);
7074 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7075 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7076 dpr->rx_jmb_prod_idx);
7081 tw32_f(HOSTCC_MODE, tp->coal_now);
7087 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7089 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7090 schedule_work(&tp->reset_task);
7093 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7095 cancel_work_sync(&tp->reset_task);
7096 tg3_flag_clear(tp, RESET_TASK_PENDING);
7097 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7100 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7102 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7103 struct tg3 *tp = tnapi->tp;
7105 struct tg3_hw_status *sblk = tnapi->hw_status;
7108 work_done = tg3_poll_work(tnapi, work_done, budget);
7110 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7113 if (unlikely(work_done >= budget))
7116 /* tp->last_tag is used in tg3_int_reenable() below
7117 * to tell the hw how much work has been processed,
7118 * so we must read it before checking for more work.
7120 tnapi->last_tag = sblk->status_tag;
7121 tnapi->last_irq_tag = tnapi->last_tag;
7124 /* check for RX/TX work to do */
7125 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7126 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7128 /* This test here is not race free, but will reduce
7129 * the number of interrupts by looping again.
7131 if (tnapi == &tp->napi[1] && tp->rx_refill)
7134 napi_complete(napi);
7135 /* Reenable interrupts. */
7136 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7138 /* This test here is synchronized by napi_schedule()
7139 * and napi_complete() to close the race condition.
7141 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7142 tw32(HOSTCC_MODE, tp->coalesce_mode |
7143 HOSTCC_MODE_ENABLE |
7154 /* work_done is guaranteed to be less than budget. */
7155 napi_complete(napi);
7156 tg3_reset_task_schedule(tp);
7160 static void tg3_process_error(struct tg3 *tp)
7163 bool real_error = false;
7165 if (tg3_flag(tp, ERROR_PROCESSED))
7168 /* Check Flow Attention register */
7169 val = tr32(HOSTCC_FLOW_ATTN);
7170 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7171 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7175 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7176 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7180 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7181 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7190 tg3_flag_set(tp, ERROR_PROCESSED);
7191 tg3_reset_task_schedule(tp);
7194 static int tg3_poll(struct napi_struct *napi, int budget)
7196 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7197 struct tg3 *tp = tnapi->tp;
7199 struct tg3_hw_status *sblk = tnapi->hw_status;
7202 if (sblk->status & SD_STATUS_ERROR)
7203 tg3_process_error(tp);
7207 work_done = tg3_poll_work(tnapi, work_done, budget);
7209 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7212 if (unlikely(work_done >= budget))
7215 if (tg3_flag(tp, TAGGED_STATUS)) {
7216 /* tp->last_tag is used in tg3_int_reenable() below
7217 * to tell the hw how much work has been processed,
7218 * so we must read it before checking for more work.
7220 tnapi->last_tag = sblk->status_tag;
7221 tnapi->last_irq_tag = tnapi->last_tag;
7224 sblk->status &= ~SD_STATUS_UPDATED;
7226 if (likely(!tg3_has_work(tnapi))) {
7227 napi_complete(napi);
7228 tg3_int_reenable(tnapi);
7236 /* work_done is guaranteed to be less than budget. */
7237 napi_complete(napi);
7238 tg3_reset_task_schedule(tp);
7242 static void tg3_napi_disable(struct tg3 *tp)
7246 for (i = tp->irq_cnt - 1; i >= 0; i--)
7247 napi_disable(&tp->napi[i].napi);
7250 static void tg3_napi_enable(struct tg3 *tp)
7254 for (i = 0; i < tp->irq_cnt; i++)
7255 napi_enable(&tp->napi[i].napi);
7258 static void tg3_napi_init(struct tg3 *tp)
7262 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7263 for (i = 1; i < tp->irq_cnt; i++)
7264 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7267 static void tg3_napi_fini(struct tg3 *tp)
7271 for (i = 0; i < tp->irq_cnt; i++)
7272 netif_napi_del(&tp->napi[i].napi);
7275 static inline void tg3_netif_stop(struct tg3 *tp)
7277 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7278 tg3_napi_disable(tp);
7279 netif_carrier_off(tp->dev);
7280 netif_tx_disable(tp->dev);
7283 /* tp->lock must be held */
7284 static inline void tg3_netif_start(struct tg3 *tp)
7288 /* NOTE: unconditional netif_tx_wake_all_queues is only
7289 * appropriate so long as all callers are assured to
7290 * have free tx slots (such as after tg3_init_hw)
7292 netif_tx_wake_all_queues(tp->dev);
7295 netif_carrier_on(tp->dev);
7297 tg3_napi_enable(tp);
7298 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7299 tg3_enable_ints(tp);
7302 static void tg3_irq_quiesce(struct tg3 *tp)
7306 BUG_ON(tp->irq_sync);
7311 for (i = 0; i < tp->irq_cnt; i++)
7312 synchronize_irq(tp->napi[i].irq_vec);
7315 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7316 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7317 * with as well. Most of the time, this is not necessary except when
7318 * shutting down the device.
7320 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7322 spin_lock_bh(&tp->lock);
7324 tg3_irq_quiesce(tp);
7327 static inline void tg3_full_unlock(struct tg3 *tp)
7329 spin_unlock_bh(&tp->lock);
7332 /* One-shot MSI handler - Chip automatically disables interrupt
7333 * after sending MSI so driver doesn't have to do it.
7335 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7337 struct tg3_napi *tnapi = dev_id;
7338 struct tg3 *tp = tnapi->tp;
7340 prefetch(tnapi->hw_status);
7342 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7344 if (likely(!tg3_irq_sync(tp)))
7345 napi_schedule(&tnapi->napi);
7350 /* MSI ISR - No need to check for interrupt sharing and no need to
7351 * flush status block and interrupt mailbox. PCI ordering rules
7352 * guarantee that MSI will arrive after the status block.
7354 static irqreturn_t tg3_msi(int irq, void *dev_id)
7356 struct tg3_napi *tnapi = dev_id;
7357 struct tg3 *tp = tnapi->tp;
7359 prefetch(tnapi->hw_status);
7361 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7363 * Writing any value to intr-mbox-0 clears PCI INTA# and
7364 * chip-internal interrupt pending events.
7365 * Writing non-zero to intr-mbox-0 additional tells the
7366 * NIC to stop sending us irqs, engaging "in-intr-handler"
7369 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7370 if (likely(!tg3_irq_sync(tp)))
7371 napi_schedule(&tnapi->napi);
7373 return IRQ_RETVAL(1);
7376 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7378 struct tg3_napi *tnapi = dev_id;
7379 struct tg3 *tp = tnapi->tp;
7380 struct tg3_hw_status *sblk = tnapi->hw_status;
7381 unsigned int handled = 1;
7383 /* In INTx mode, it is possible for the interrupt to arrive at
7384 * the CPU before the status block posted prior to the interrupt.
7385 * Reading the PCI State register will confirm whether the
7386 * interrupt is ours and will flush the status block.
7388 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7389 if (tg3_flag(tp, CHIP_RESETTING) ||
7390 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7397 * Writing any value to intr-mbox-0 clears PCI INTA# and
7398 * chip-internal interrupt pending events.
7399 * Writing non-zero to intr-mbox-0 additional tells the
7400 * NIC to stop sending us irqs, engaging "in-intr-handler"
7403 * Flush the mailbox to de-assert the IRQ immediately to prevent
7404 * spurious interrupts. The flush impacts performance but
7405 * excessive spurious interrupts can be worse in some cases.
7407 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7408 if (tg3_irq_sync(tp))
7410 sblk->status &= ~SD_STATUS_UPDATED;
7411 if (likely(tg3_has_work(tnapi))) {
7412 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7413 napi_schedule(&tnapi->napi);
7415 /* No work, shared interrupt perhaps? re-enable
7416 * interrupts, and flush that PCI write
7418 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7422 return IRQ_RETVAL(handled);
7425 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7427 struct tg3_napi *tnapi = dev_id;
7428 struct tg3 *tp = tnapi->tp;
7429 struct tg3_hw_status *sblk = tnapi->hw_status;
7430 unsigned int handled = 1;
7432 /* In INTx mode, it is possible for the interrupt to arrive at
7433 * the CPU before the status block posted prior to the interrupt.
7434 * Reading the PCI State register will confirm whether the
7435 * interrupt is ours and will flush the status block.
7437 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7438 if (tg3_flag(tp, CHIP_RESETTING) ||
7439 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7446 * writing any value to intr-mbox-0 clears PCI INTA# and
7447 * chip-internal interrupt pending events.
7448 * writing non-zero to intr-mbox-0 additional tells the
7449 * NIC to stop sending us irqs, engaging "in-intr-handler"
7452 * Flush the mailbox to de-assert the IRQ immediately to prevent
7453 * spurious interrupts. The flush impacts performance but
7454 * excessive spurious interrupts can be worse in some cases.
7456 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7459 * In a shared interrupt configuration, sometimes other devices'
7460 * interrupts will scream. We record the current status tag here
7461 * so that the above check can report that the screaming interrupts
7462 * are unhandled. Eventually they will be silenced.
7464 tnapi->last_irq_tag = sblk->status_tag;
7466 if (tg3_irq_sync(tp))
7469 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7471 napi_schedule(&tnapi->napi);
7474 return IRQ_RETVAL(handled);
7477 /* ISR for interrupt test */
7478 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7480 struct tg3_napi *tnapi = dev_id;
7481 struct tg3 *tp = tnapi->tp;
7482 struct tg3_hw_status *sblk = tnapi->hw_status;
7484 if ((sblk->status & SD_STATUS_UPDATED) ||
7485 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7486 tg3_disable_ints(tp);
7487 return IRQ_RETVAL(1);
7489 return IRQ_RETVAL(0);
7492 #ifdef CONFIG_NET_POLL_CONTROLLER
7493 static void tg3_poll_controller(struct net_device *dev)
7496 struct tg3 *tp = netdev_priv(dev);
7498 if (tg3_irq_sync(tp))
7501 for (i = 0; i < tp->irq_cnt; i++)
7502 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7506 static void tg3_tx_timeout(struct net_device *dev)
7508 struct tg3 *tp = netdev_priv(dev);
7510 if (netif_msg_tx_err(tp)) {
7511 netdev_err(dev, "transmit timed out, resetting\n");
7515 tg3_reset_task_schedule(tp);
7518 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7519 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7521 u32 base = (u32) mapping & 0xffffffff;
7523 return (base > 0xffffdcc0) && (base + len + 8 < base);
7526 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7527 * of any 4GB boundaries: 4G, 8G, etc
7529 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7532 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7533 u32 base = (u32) mapping & 0xffffffff;
7535 return ((base + len + (mss & 0x3fff)) < base);
7540 /* Test for DMA addresses > 40-bit */
7541 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7544 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7545 if (tg3_flag(tp, 40BIT_DMA_BUG))
7546 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7553 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7554 dma_addr_t mapping, u32 len, u32 flags,
7557 txbd->addr_hi = ((u64) mapping >> 32);
7558 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7559 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7560 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7563 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7564 dma_addr_t map, u32 len, u32 flags,
7567 struct tg3 *tp = tnapi->tp;
7570 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7573 if (tg3_4g_overflow_test(map, len))
7576 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7579 if (tg3_40bit_overflow_test(tp, map, len))
7582 if (tp->dma_limit) {
7583 u32 prvidx = *entry;
7584 u32 tmp_flag = flags & ~TXD_FLAG_END;
7585 while (len > tp->dma_limit && *budget) {
7586 u32 frag_len = tp->dma_limit;
7587 len -= tp->dma_limit;
7589 /* Avoid the 8byte DMA problem */
7591 len += tp->dma_limit / 2;
7592 frag_len = tp->dma_limit / 2;
7595 tnapi->tx_buffers[*entry].fragmented = true;
7597 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7598 frag_len, tmp_flag, mss, vlan);
7601 *entry = NEXT_TX(*entry);
7608 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7609 len, flags, mss, vlan);
7611 *entry = NEXT_TX(*entry);
7614 tnapi->tx_buffers[prvidx].fragmented = false;
7618 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7619 len, flags, mss, vlan);
7620 *entry = NEXT_TX(*entry);
7626 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7629 struct sk_buff *skb;
7630 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7635 pci_unmap_single(tnapi->tp->pdev,
7636 dma_unmap_addr(txb, mapping),
7640 while (txb->fragmented) {
7641 txb->fragmented = false;
7642 entry = NEXT_TX(entry);
7643 txb = &tnapi->tx_buffers[entry];
7646 for (i = 0; i <= last; i++) {
7647 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7649 entry = NEXT_TX(entry);
7650 txb = &tnapi->tx_buffers[entry];
7652 pci_unmap_page(tnapi->tp->pdev,
7653 dma_unmap_addr(txb, mapping),
7654 skb_frag_size(frag), PCI_DMA_TODEVICE);
7656 while (txb->fragmented) {
7657 txb->fragmented = false;
7658 entry = NEXT_TX(entry);
7659 txb = &tnapi->tx_buffers[entry];
7664 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7665 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7666 struct sk_buff **pskb,
7667 u32 *entry, u32 *budget,
7668 u32 base_flags, u32 mss, u32 vlan)
7670 struct tg3 *tp = tnapi->tp;
7671 struct sk_buff *new_skb, *skb = *pskb;
7672 dma_addr_t new_addr = 0;
7675 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7676 new_skb = skb_copy(skb, GFP_ATOMIC);
7678 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7680 new_skb = skb_copy_expand(skb,
7681 skb_headroom(skb) + more_headroom,
7682 skb_tailroom(skb), GFP_ATOMIC);
7688 /* New SKB is guaranteed to be linear. */
7689 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7691 /* Make sure the mapping succeeded */
7692 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7693 dev_kfree_skb(new_skb);
7696 u32 save_entry = *entry;
7698 base_flags |= TXD_FLAG_END;
7700 tnapi->tx_buffers[*entry].skb = new_skb;
7701 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7704 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7705 new_skb->len, base_flags,
7707 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7708 dev_kfree_skb(new_skb);
7719 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7721 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7722 * TSO header is greater than 80 bytes.
7724 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7726 struct sk_buff *segs, *nskb;
7727 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7729 /* Estimate the number of fragments in the worst case */
7730 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7731 netif_stop_queue(tp->dev);
7733 /* netif_tx_stop_queue() must be done before checking
7734 * checking tx index in tg3_tx_avail() below, because in
7735 * tg3_tx(), we update tx index before checking for
7736 * netif_tx_queue_stopped().
7739 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7740 return NETDEV_TX_BUSY;
7742 netif_wake_queue(tp->dev);
7745 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7747 goto tg3_tso_bug_end;
7753 tg3_start_xmit(nskb, tp->dev);
7759 return NETDEV_TX_OK;
7762 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7763 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7765 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7767 struct tg3 *tp = netdev_priv(dev);
7768 u32 len, entry, base_flags, mss, vlan = 0;
7770 int i = -1, would_hit_hwbug;
7772 struct tg3_napi *tnapi;
7773 struct netdev_queue *txq;
7776 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7777 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7778 if (tg3_flag(tp, ENABLE_TSS))
7781 budget = tg3_tx_avail(tnapi);
7783 /* We are running in BH disabled context with netif_tx_lock
7784 * and TX reclaim runs via tp->napi.poll inside of a software
7785 * interrupt. Furthermore, IRQ processing runs lockless so we have
7786 * no IRQ context deadlocks to worry about either. Rejoice!
7788 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7789 if (!netif_tx_queue_stopped(txq)) {
7790 netif_tx_stop_queue(txq);
7792 /* This is a hard error, log it. */
7794 "BUG! Tx Ring full when queue awake!\n");
7796 return NETDEV_TX_BUSY;
7799 entry = tnapi->tx_prod;
7801 if (skb->ip_summed == CHECKSUM_PARTIAL)
7802 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7804 mss = skb_shinfo(skb)->gso_size;
7807 u32 tcp_opt_len, hdr_len;
7809 if (skb_header_cloned(skb) &&
7810 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7814 tcp_opt_len = tcp_optlen(skb);
7816 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7818 if (!skb_is_gso_v6(skb)) {
7820 iph->tot_len = htons(mss + hdr_len);
7823 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7824 tg3_flag(tp, TSO_BUG))
7825 return tg3_tso_bug(tp, skb);
7827 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7828 TXD_FLAG_CPU_POST_DMA);
7830 if (tg3_flag(tp, HW_TSO_1) ||
7831 tg3_flag(tp, HW_TSO_2) ||
7832 tg3_flag(tp, HW_TSO_3)) {
7833 tcp_hdr(skb)->check = 0;
7834 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7836 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7841 if (tg3_flag(tp, HW_TSO_3)) {
7842 mss |= (hdr_len & 0xc) << 12;
7844 base_flags |= 0x00000010;
7845 base_flags |= (hdr_len & 0x3e0) << 5;
7846 } else if (tg3_flag(tp, HW_TSO_2))
7847 mss |= hdr_len << 9;
7848 else if (tg3_flag(tp, HW_TSO_1) ||
7849 tg3_asic_rev(tp) == ASIC_REV_5705) {
7850 if (tcp_opt_len || iph->ihl > 5) {
7853 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7854 mss |= (tsflags << 11);
7857 if (tcp_opt_len || iph->ihl > 5) {
7860 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7861 base_flags |= tsflags << 12;
7866 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7867 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7868 base_flags |= TXD_FLAG_JMB_PKT;
7870 if (vlan_tx_tag_present(skb)) {
7871 base_flags |= TXD_FLAG_VLAN;
7872 vlan = vlan_tx_tag_get(skb);
7875 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7876 tg3_flag(tp, TX_TSTAMP_EN)) {
7877 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7878 base_flags |= TXD_FLAG_HWTSTAMP;
7881 len = skb_headlen(skb);
7883 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7884 if (pci_dma_mapping_error(tp->pdev, mapping))
7888 tnapi->tx_buffers[entry].skb = skb;
7889 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7891 would_hit_hwbug = 0;
7893 if (tg3_flag(tp, 5701_DMA_BUG))
7894 would_hit_hwbug = 1;
7896 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7897 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7899 would_hit_hwbug = 1;
7900 } else if (skb_shinfo(skb)->nr_frags > 0) {
7903 if (!tg3_flag(tp, HW_TSO_1) &&
7904 !tg3_flag(tp, HW_TSO_2) &&
7905 !tg3_flag(tp, HW_TSO_3))
7908 /* Now loop through additional data
7909 * fragments, and queue them.
7911 last = skb_shinfo(skb)->nr_frags - 1;
7912 for (i = 0; i <= last; i++) {
7913 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7915 len = skb_frag_size(frag);
7916 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7917 len, DMA_TO_DEVICE);
7919 tnapi->tx_buffers[entry].skb = NULL;
7920 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7922 if (dma_mapping_error(&tp->pdev->dev, mapping))
7926 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7928 ((i == last) ? TXD_FLAG_END : 0),
7930 would_hit_hwbug = 1;
7936 if (would_hit_hwbug) {
7937 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7939 /* If the workaround fails due to memory/mapping
7940 * failure, silently drop this packet.
7942 entry = tnapi->tx_prod;
7943 budget = tg3_tx_avail(tnapi);
7944 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7945 base_flags, mss, vlan))
7949 skb_tx_timestamp(skb);
7950 netdev_tx_sent_queue(txq, skb->len);
7952 /* Sync BD data before updating mailbox */
7955 /* Packets are ready, update Tx producer idx local and on card. */
7956 tw32_tx_mbox(tnapi->prodmbox, entry);
7958 tnapi->tx_prod = entry;
7959 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7960 netif_tx_stop_queue(txq);
7962 /* netif_tx_stop_queue() must be done before checking
7963 * checking tx index in tg3_tx_avail() below, because in
7964 * tg3_tx(), we update tx index before checking for
7965 * netif_tx_queue_stopped().
7968 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7969 netif_tx_wake_queue(txq);
7973 return NETDEV_TX_OK;
7976 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7977 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7982 return NETDEV_TX_OK;
7985 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7988 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7989 MAC_MODE_PORT_MODE_MASK);
7991 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7993 if (!tg3_flag(tp, 5705_PLUS))
7994 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7996 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7997 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7999 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8001 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8003 if (tg3_flag(tp, 5705_PLUS) ||
8004 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8005 tg3_asic_rev(tp) == ASIC_REV_5700)
8006 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8009 tw32(MAC_MODE, tp->mac_mode);
8013 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8015 u32 val, bmcr, mac_mode, ptest = 0;
8017 tg3_phy_toggle_apd(tp, false);
8018 tg3_phy_toggle_automdix(tp, false);
8020 if (extlpbk && tg3_phy_set_extloopbk(tp))
8023 bmcr = BMCR_FULLDPLX;
8028 bmcr |= BMCR_SPEED100;
8032 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8034 bmcr |= BMCR_SPEED100;
8037 bmcr |= BMCR_SPEED1000;
8042 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8043 tg3_readphy(tp, MII_CTRL1000, &val);
8044 val |= CTL1000_AS_MASTER |
8045 CTL1000_ENABLE_MASTER;
8046 tg3_writephy(tp, MII_CTRL1000, val);
8048 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8049 MII_TG3_FET_PTEST_TRIM_2;
8050 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8053 bmcr |= BMCR_LOOPBACK;
8055 tg3_writephy(tp, MII_BMCR, bmcr);
8057 /* The write needs to be flushed for the FETs */
8058 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8059 tg3_readphy(tp, MII_BMCR, &bmcr);
8063 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8064 tg3_asic_rev(tp) == ASIC_REV_5785) {
8065 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8066 MII_TG3_FET_PTEST_FRC_TX_LINK |
8067 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8069 /* The write needs to be flushed for the AC131 */
8070 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8073 /* Reset to prevent losing 1st rx packet intermittently */
8074 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8075 tg3_flag(tp, 5780_CLASS)) {
8076 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8078 tw32_f(MAC_RX_MODE, tp->rx_mode);
8081 mac_mode = tp->mac_mode &
8082 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8083 if (speed == SPEED_1000)
8084 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8086 mac_mode |= MAC_MODE_PORT_MODE_MII;
8088 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8089 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8091 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8092 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8093 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8094 mac_mode |= MAC_MODE_LINK_POLARITY;
8096 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8097 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8100 tw32(MAC_MODE, mac_mode);
8106 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8108 struct tg3 *tp = netdev_priv(dev);
8110 if (features & NETIF_F_LOOPBACK) {
8111 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8114 spin_lock_bh(&tp->lock);
8115 tg3_mac_loopback(tp, true);
8116 netif_carrier_on(tp->dev);
8117 spin_unlock_bh(&tp->lock);
8118 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8120 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8123 spin_lock_bh(&tp->lock);
8124 tg3_mac_loopback(tp, false);
8125 /* Force link status check */
8126 tg3_setup_phy(tp, true);
8127 spin_unlock_bh(&tp->lock);
8128 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8132 static netdev_features_t tg3_fix_features(struct net_device *dev,
8133 netdev_features_t features)
8135 struct tg3 *tp = netdev_priv(dev);
8137 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8138 features &= ~NETIF_F_ALL_TSO;
8143 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8145 netdev_features_t changed = dev->features ^ features;
8147 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8148 tg3_set_loopback(dev, features);
8153 static void tg3_rx_prodring_free(struct tg3 *tp,
8154 struct tg3_rx_prodring_set *tpr)
8158 if (tpr != &tp->napi[0].prodring) {
8159 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8160 i = (i + 1) & tp->rx_std_ring_mask)
8161 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8164 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8165 for (i = tpr->rx_jmb_cons_idx;
8166 i != tpr->rx_jmb_prod_idx;
8167 i = (i + 1) & tp->rx_jmb_ring_mask) {
8168 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8176 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8177 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8180 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8181 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8182 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8187 /* Initialize rx rings for packet processing.
8189 * The chip has been shut down and the driver detached from
8190 * the networking, so no interrupts or new tx packets will
8191 * end up in the driver. tp->{tx,}lock are held and thus
8194 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8195 struct tg3_rx_prodring_set *tpr)
8197 u32 i, rx_pkt_dma_sz;
8199 tpr->rx_std_cons_idx = 0;
8200 tpr->rx_std_prod_idx = 0;
8201 tpr->rx_jmb_cons_idx = 0;
8202 tpr->rx_jmb_prod_idx = 0;
8204 if (tpr != &tp->napi[0].prodring) {
8205 memset(&tpr->rx_std_buffers[0], 0,
8206 TG3_RX_STD_BUFF_RING_SIZE(tp));
8207 if (tpr->rx_jmb_buffers)
8208 memset(&tpr->rx_jmb_buffers[0], 0,
8209 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8213 /* Zero out all descriptors. */
8214 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8216 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8217 if (tg3_flag(tp, 5780_CLASS) &&
8218 tp->dev->mtu > ETH_DATA_LEN)
8219 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8220 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8222 /* Initialize invariants of the rings, we only set this
8223 * stuff once. This works because the card does not
8224 * write into the rx buffer posting rings.
8226 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8227 struct tg3_rx_buffer_desc *rxd;
8229 rxd = &tpr->rx_std[i];
8230 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8231 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8232 rxd->opaque = (RXD_OPAQUE_RING_STD |
8233 (i << RXD_OPAQUE_INDEX_SHIFT));
8236 /* Now allocate fresh SKBs for each rx ring. */
8237 for (i = 0; i < tp->rx_pending; i++) {
8238 unsigned int frag_size;
8240 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8242 netdev_warn(tp->dev,
8243 "Using a smaller RX standard ring. Only "
8244 "%d out of %d buffers were allocated "
8245 "successfully\n", i, tp->rx_pending);
8253 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8256 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8258 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8261 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8262 struct tg3_rx_buffer_desc *rxd;
8264 rxd = &tpr->rx_jmb[i].std;
8265 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8266 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8268 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8269 (i << RXD_OPAQUE_INDEX_SHIFT));
8272 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8273 unsigned int frag_size;
8275 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8277 netdev_warn(tp->dev,
8278 "Using a smaller RX jumbo ring. Only %d "
8279 "out of %d buffers were allocated "
8280 "successfully\n", i, tp->rx_jumbo_pending);
8283 tp->rx_jumbo_pending = i;
8292 tg3_rx_prodring_free(tp, tpr);
8296 static void tg3_rx_prodring_fini(struct tg3 *tp,
8297 struct tg3_rx_prodring_set *tpr)
8299 kfree(tpr->rx_std_buffers);
8300 tpr->rx_std_buffers = NULL;
8301 kfree(tpr->rx_jmb_buffers);
8302 tpr->rx_jmb_buffers = NULL;
8304 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8305 tpr->rx_std, tpr->rx_std_mapping);
8309 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8310 tpr->rx_jmb, tpr->rx_jmb_mapping);
8315 static int tg3_rx_prodring_init(struct tg3 *tp,
8316 struct tg3_rx_prodring_set *tpr)
8318 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8320 if (!tpr->rx_std_buffers)
8323 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8324 TG3_RX_STD_RING_BYTES(tp),
8325 &tpr->rx_std_mapping,
8330 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8331 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8333 if (!tpr->rx_jmb_buffers)
8336 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8337 TG3_RX_JMB_RING_BYTES(tp),
8338 &tpr->rx_jmb_mapping,
8347 tg3_rx_prodring_fini(tp, tpr);
8351 /* Free up pending packets in all rx/tx rings.
8353 * The chip has been shut down and the driver detached from
8354 * the networking, so no interrupts or new tx packets will
8355 * end up in the driver. tp->{tx,}lock is not held and we are not
8356 * in an interrupt context and thus may sleep.
8358 static void tg3_free_rings(struct tg3 *tp)
8362 for (j = 0; j < tp->irq_cnt; j++) {
8363 struct tg3_napi *tnapi = &tp->napi[j];
8365 tg3_rx_prodring_free(tp, &tnapi->prodring);
8367 if (!tnapi->tx_buffers)
8370 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8371 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8376 tg3_tx_skb_unmap(tnapi, i,
8377 skb_shinfo(skb)->nr_frags - 1);
8379 dev_kfree_skb_any(skb);
8381 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8385 /* Initialize tx/rx rings for packet processing.
8387 * The chip has been shut down and the driver detached from
8388 * the networking, so no interrupts or new tx packets will
8389 * end up in the driver. tp->{tx,}lock are held and thus
8392 static int tg3_init_rings(struct tg3 *tp)
8396 /* Free up all the SKBs. */
8399 for (i = 0; i < tp->irq_cnt; i++) {
8400 struct tg3_napi *tnapi = &tp->napi[i];
8402 tnapi->last_tag = 0;
8403 tnapi->last_irq_tag = 0;
8404 tnapi->hw_status->status = 0;
8405 tnapi->hw_status->status_tag = 0;
8406 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8411 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8413 tnapi->rx_rcb_ptr = 0;
8415 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8417 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8426 static void tg3_mem_tx_release(struct tg3 *tp)
8430 for (i = 0; i < tp->irq_max; i++) {
8431 struct tg3_napi *tnapi = &tp->napi[i];
8433 if (tnapi->tx_ring) {
8434 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8435 tnapi->tx_ring, tnapi->tx_desc_mapping);
8436 tnapi->tx_ring = NULL;
8439 kfree(tnapi->tx_buffers);
8440 tnapi->tx_buffers = NULL;
8444 static int tg3_mem_tx_acquire(struct tg3 *tp)
8447 struct tg3_napi *tnapi = &tp->napi[0];
8449 /* If multivector TSS is enabled, vector 0 does not handle
8450 * tx interrupts. Don't allocate any resources for it.
8452 if (tg3_flag(tp, ENABLE_TSS))
8455 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8456 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8457 TG3_TX_RING_SIZE, GFP_KERNEL);
8458 if (!tnapi->tx_buffers)
8461 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8463 &tnapi->tx_desc_mapping,
8465 if (!tnapi->tx_ring)
8472 tg3_mem_tx_release(tp);
8476 static void tg3_mem_rx_release(struct tg3 *tp)
8480 for (i = 0; i < tp->irq_max; i++) {
8481 struct tg3_napi *tnapi = &tp->napi[i];
8483 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8488 dma_free_coherent(&tp->pdev->dev,
8489 TG3_RX_RCB_RING_BYTES(tp),
8491 tnapi->rx_rcb_mapping);
8492 tnapi->rx_rcb = NULL;
8496 static int tg3_mem_rx_acquire(struct tg3 *tp)
8498 unsigned int i, limit;
8500 limit = tp->rxq_cnt;
8502 /* If RSS is enabled, we need a (dummy) producer ring
8503 * set on vector zero. This is the true hw prodring.
8505 if (tg3_flag(tp, ENABLE_RSS))
8508 for (i = 0; i < limit; i++) {
8509 struct tg3_napi *tnapi = &tp->napi[i];
8511 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8514 /* If multivector RSS is enabled, vector 0
8515 * does not handle rx or tx interrupts.
8516 * Don't allocate any resources for it.
8518 if (!i && tg3_flag(tp, ENABLE_RSS))
8521 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8522 TG3_RX_RCB_RING_BYTES(tp),
8523 &tnapi->rx_rcb_mapping,
8524 GFP_KERNEL | __GFP_ZERO);
8532 tg3_mem_rx_release(tp);
8537 * Must not be invoked with interrupt sources disabled and
8538 * the hardware shutdown down.
8540 static void tg3_free_consistent(struct tg3 *tp)
8544 for (i = 0; i < tp->irq_cnt; i++) {
8545 struct tg3_napi *tnapi = &tp->napi[i];
8547 if (tnapi->hw_status) {
8548 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8550 tnapi->status_mapping);
8551 tnapi->hw_status = NULL;
8555 tg3_mem_rx_release(tp);
8556 tg3_mem_tx_release(tp);
8559 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8560 tp->hw_stats, tp->stats_mapping);
8561 tp->hw_stats = NULL;
8566 * Must not be invoked with interrupt sources disabled and
8567 * the hardware shutdown down. Can sleep.
8569 static int tg3_alloc_consistent(struct tg3 *tp)
8573 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8574 sizeof(struct tg3_hw_stats),
8576 GFP_KERNEL | __GFP_ZERO);
8580 for (i = 0; i < tp->irq_cnt; i++) {
8581 struct tg3_napi *tnapi = &tp->napi[i];
8582 struct tg3_hw_status *sblk;
8584 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8586 &tnapi->status_mapping,
8587 GFP_KERNEL | __GFP_ZERO);
8588 if (!tnapi->hw_status)
8591 sblk = tnapi->hw_status;
8593 if (tg3_flag(tp, ENABLE_RSS)) {
8594 u16 *prodptr = NULL;
8597 * When RSS is enabled, the status block format changes
8598 * slightly. The "rx_jumbo_consumer", "reserved",
8599 * and "rx_mini_consumer" members get mapped to the
8600 * other three rx return ring producer indexes.
8604 prodptr = &sblk->idx[0].rx_producer;
8607 prodptr = &sblk->rx_jumbo_consumer;
8610 prodptr = &sblk->reserved;
8613 prodptr = &sblk->rx_mini_consumer;
8616 tnapi->rx_rcb_prod_idx = prodptr;
8618 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8622 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8628 tg3_free_consistent(tp);
8632 #define MAX_WAIT_CNT 1000
8634 /* To stop a block, clear the enable bit and poll till it
8635 * clears. tp->lock is held.
8637 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8642 if (tg3_flag(tp, 5705_PLUS)) {
8649 /* We can't enable/disable these bits of the
8650 * 5705/5750, just say success.
8663 for (i = 0; i < MAX_WAIT_CNT; i++) {
8666 if ((val & enable_bit) == 0)
8670 if (i == MAX_WAIT_CNT && !silent) {
8671 dev_err(&tp->pdev->dev,
8672 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8680 /* tp->lock is held. */
8681 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8685 tg3_disable_ints(tp);
8687 tp->rx_mode &= ~RX_MODE_ENABLE;
8688 tw32_f(MAC_RX_MODE, tp->rx_mode);
8691 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8692 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8693 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8694 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8695 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8696 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8698 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8699 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8700 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8701 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8702 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8703 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8704 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8706 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8707 tw32_f(MAC_MODE, tp->mac_mode);
8710 tp->tx_mode &= ~TX_MODE_ENABLE;
8711 tw32_f(MAC_TX_MODE, tp->tx_mode);
8713 for (i = 0; i < MAX_WAIT_CNT; i++) {
8715 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8718 if (i >= MAX_WAIT_CNT) {
8719 dev_err(&tp->pdev->dev,
8720 "%s timed out, TX_MODE_ENABLE will not clear "
8721 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8725 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8726 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8727 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8729 tw32(FTQ_RESET, 0xffffffff);
8730 tw32(FTQ_RESET, 0x00000000);
8732 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8733 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8735 for (i = 0; i < tp->irq_cnt; i++) {
8736 struct tg3_napi *tnapi = &tp->napi[i];
8737 if (tnapi->hw_status)
8738 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8744 /* Save PCI command register before chip reset */
8745 static void tg3_save_pci_state(struct tg3 *tp)
8747 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8750 /* Restore PCI state after chip reset */
8751 static void tg3_restore_pci_state(struct tg3 *tp)
8755 /* Re-enable indirect register accesses. */
8756 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8757 tp->misc_host_ctrl);
8759 /* Set MAX PCI retry to zero. */
8760 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8762 tg3_flag(tp, PCIX_MODE))
8763 val |= PCISTATE_RETRY_SAME_DMA;
8764 /* Allow reads and writes to the APE register and memory space. */
8765 if (tg3_flag(tp, ENABLE_APE))
8766 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8767 PCISTATE_ALLOW_APE_SHMEM_WR |
8768 PCISTATE_ALLOW_APE_PSPACE_WR;
8769 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8771 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8773 if (!tg3_flag(tp, PCI_EXPRESS)) {
8774 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8775 tp->pci_cacheline_sz);
8776 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8780 /* Make sure PCI-X relaxed ordering bit is clear. */
8781 if (tg3_flag(tp, PCIX_MODE)) {
8784 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8786 pcix_cmd &= ~PCI_X_CMD_ERO;
8787 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8791 if (tg3_flag(tp, 5780_CLASS)) {
8793 /* Chip reset on 5780 will reset MSI enable bit,
8794 * so need to restore it.
8796 if (tg3_flag(tp, USING_MSI)) {
8799 pci_read_config_word(tp->pdev,
8800 tp->msi_cap + PCI_MSI_FLAGS,
8802 pci_write_config_word(tp->pdev,
8803 tp->msi_cap + PCI_MSI_FLAGS,
8804 ctrl | PCI_MSI_FLAGS_ENABLE);
8805 val = tr32(MSGINT_MODE);
8806 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8811 /* tp->lock is held. */
8812 static int tg3_chip_reset(struct tg3 *tp)
8815 void (*write_op)(struct tg3 *, u32, u32);
8820 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8822 /* No matching tg3_nvram_unlock() after this because
8823 * chip reset below will undo the nvram lock.
8825 tp->nvram_lock_cnt = 0;
8827 /* GRC_MISC_CFG core clock reset will clear the memory
8828 * enable bit in PCI register 4 and the MSI enable bit
8829 * on some chips, so we save relevant registers here.
8831 tg3_save_pci_state(tp);
8833 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8834 tg3_flag(tp, 5755_PLUS))
8835 tw32(GRC_FASTBOOT_PC, 0);
8838 * We must avoid the readl() that normally takes place.
8839 * It locks machines, causes machine checks, and other
8840 * fun things. So, temporarily disable the 5701
8841 * hardware workaround, while we do the reset.
8843 write_op = tp->write32;
8844 if (write_op == tg3_write_flush_reg32)
8845 tp->write32 = tg3_write32;
8847 /* Prevent the irq handler from reading or writing PCI registers
8848 * during chip reset when the memory enable bit in the PCI command
8849 * register may be cleared. The chip does not generate interrupt
8850 * at this time, but the irq handler may still be called due to irq
8851 * sharing or irqpoll.
8853 tg3_flag_set(tp, CHIP_RESETTING);
8854 for (i = 0; i < tp->irq_cnt; i++) {
8855 struct tg3_napi *tnapi = &tp->napi[i];
8856 if (tnapi->hw_status) {
8857 tnapi->hw_status->status = 0;
8858 tnapi->hw_status->status_tag = 0;
8860 tnapi->last_tag = 0;
8861 tnapi->last_irq_tag = 0;
8865 for (i = 0; i < tp->irq_cnt; i++)
8866 synchronize_irq(tp->napi[i].irq_vec);
8868 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8869 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8870 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8874 val = GRC_MISC_CFG_CORECLK_RESET;
8876 if (tg3_flag(tp, PCI_EXPRESS)) {
8877 /* Force PCIe 1.0a mode */
8878 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8879 !tg3_flag(tp, 57765_PLUS) &&
8880 tr32(TG3_PCIE_PHY_TSTCTL) ==
8881 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8882 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8884 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8885 tw32(GRC_MISC_CFG, (1 << 29));
8890 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8891 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8892 tw32(GRC_VCPU_EXT_CTRL,
8893 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8896 /* Manage gphy power for all CPMU absent PCIe devices. */
8897 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8898 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8900 tw32(GRC_MISC_CFG, val);
8902 /* restore 5701 hardware bug workaround write method */
8903 tp->write32 = write_op;
8905 /* Unfortunately, we have to delay before the PCI read back.
8906 * Some 575X chips even will not respond to a PCI cfg access
8907 * when the reset command is given to the chip.
8909 * How do these hardware designers expect things to work
8910 * properly if the PCI write is posted for a long period
8911 * of time? It is always necessary to have some method by
8912 * which a register read back can occur to push the write
8913 * out which does the reset.
8915 * For most tg3 variants the trick below was working.
8920 /* Flush PCI posted writes. The normal MMIO registers
8921 * are inaccessible at this time so this is the only
8922 * way to make this reliably (actually, this is no longer
8923 * the case, see above). I tried to use indirect
8924 * register read/write but this upset some 5701 variants.
8926 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8930 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8933 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8937 /* Wait for link training to complete. */
8938 for (j = 0; j < 5000; j++)
8941 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8942 pci_write_config_dword(tp->pdev, 0xc4,
8943 cfg_val | (1 << 15));
8946 /* Clear the "no snoop" and "relaxed ordering" bits. */
8947 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8949 * Older PCIe devices only support the 128 byte
8950 * MPS setting. Enforce the restriction.
8952 if (!tg3_flag(tp, CPMU_PRESENT))
8953 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8954 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8956 /* Clear error status */
8957 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8958 PCI_EXP_DEVSTA_CED |
8959 PCI_EXP_DEVSTA_NFED |
8960 PCI_EXP_DEVSTA_FED |
8961 PCI_EXP_DEVSTA_URD);
8964 tg3_restore_pci_state(tp);
8966 tg3_flag_clear(tp, CHIP_RESETTING);
8967 tg3_flag_clear(tp, ERROR_PROCESSED);
8970 if (tg3_flag(tp, 5780_CLASS))
8971 val = tr32(MEMARB_MODE);
8972 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8974 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8976 tw32(0x5000, 0x400);
8979 if (tg3_flag(tp, IS_SSB_CORE)) {
8981 * BCM4785: In order to avoid repercussions from using
8982 * potentially defective internal ROM, stop the Rx RISC CPU,
8983 * which is not required.
8986 tg3_halt_cpu(tp, RX_CPU_BASE);
8989 err = tg3_poll_fw(tp);
8993 tw32(GRC_MODE, tp->grc_mode);
8995 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8998 tw32(0xc4, val | (1 << 15));
9001 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9002 tg3_asic_rev(tp) == ASIC_REV_5705) {
9003 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9004 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9005 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9006 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9009 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9010 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9012 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9013 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9018 tw32_f(MAC_MODE, val);
9021 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9025 if (tg3_flag(tp, PCI_EXPRESS) &&
9026 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9027 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9028 !tg3_flag(tp, 57765_PLUS)) {
9031 tw32(0x7c00, val | (1 << 25));
9034 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9035 val = tr32(TG3_CPMU_CLCK_ORIDE);
9036 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9039 /* Reprobe ASF enable state. */
9040 tg3_flag_clear(tp, ENABLE_ASF);
9041 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9042 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9044 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9045 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9046 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9049 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9050 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9051 tg3_flag_set(tp, ENABLE_ASF);
9052 tp->last_event_jiffies = jiffies;
9053 if (tg3_flag(tp, 5750_PLUS))
9054 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9056 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9057 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9058 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9059 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9060 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9067 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9068 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9070 /* tp->lock is held. */
9071 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9077 tg3_write_sig_pre_reset(tp, kind);
9079 tg3_abort_hw(tp, silent);
9080 err = tg3_chip_reset(tp);
9082 __tg3_set_mac_addr(tp, false);
9084 tg3_write_sig_legacy(tp, kind);
9085 tg3_write_sig_post_reset(tp, kind);
9088 /* Save the stats across chip resets... */
9089 tg3_get_nstats(tp, &tp->net_stats_prev);
9090 tg3_get_estats(tp, &tp->estats_prev);
9092 /* And make sure the next sample is new data */
9093 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9102 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9104 struct tg3 *tp = netdev_priv(dev);
9105 struct sockaddr *addr = p;
9107 bool skip_mac_1 = false;
9109 if (!is_valid_ether_addr(addr->sa_data))
9110 return -EADDRNOTAVAIL;
9112 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9114 if (!netif_running(dev))
9117 if (tg3_flag(tp, ENABLE_ASF)) {
9118 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9120 addr0_high = tr32(MAC_ADDR_0_HIGH);
9121 addr0_low = tr32(MAC_ADDR_0_LOW);
9122 addr1_high = tr32(MAC_ADDR_1_HIGH);
9123 addr1_low = tr32(MAC_ADDR_1_LOW);
9125 /* Skip MAC addr 1 if ASF is using it. */
9126 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9127 !(addr1_high == 0 && addr1_low == 0))
9130 spin_lock_bh(&tp->lock);
9131 __tg3_set_mac_addr(tp, skip_mac_1);
9132 spin_unlock_bh(&tp->lock);
9137 /* tp->lock is held. */
9138 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9139 dma_addr_t mapping, u32 maxlen_flags,
9143 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9144 ((u64) mapping >> 32));
9146 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9147 ((u64) mapping & 0xffffffff));
9149 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9152 if (!tg3_flag(tp, 5705_PLUS))
9154 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9159 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9163 if (!tg3_flag(tp, ENABLE_TSS)) {
9164 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9165 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9166 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9168 tw32(HOSTCC_TXCOL_TICKS, 0);
9169 tw32(HOSTCC_TXMAX_FRAMES, 0);
9170 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9172 for (; i < tp->txq_cnt; i++) {
9175 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9176 tw32(reg, ec->tx_coalesce_usecs);
9177 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9178 tw32(reg, ec->tx_max_coalesced_frames);
9179 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9180 tw32(reg, ec->tx_max_coalesced_frames_irq);
9184 for (; i < tp->irq_max - 1; i++) {
9185 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9186 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9187 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9191 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9194 u32 limit = tp->rxq_cnt;
9196 if (!tg3_flag(tp, ENABLE_RSS)) {
9197 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9198 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9199 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9202 tw32(HOSTCC_RXCOL_TICKS, 0);
9203 tw32(HOSTCC_RXMAX_FRAMES, 0);
9204 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9207 for (; i < limit; i++) {
9210 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9211 tw32(reg, ec->rx_coalesce_usecs);
9212 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9213 tw32(reg, ec->rx_max_coalesced_frames);
9214 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9215 tw32(reg, ec->rx_max_coalesced_frames_irq);
9218 for (; i < tp->irq_max - 1; i++) {
9219 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9220 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9221 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9225 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9227 tg3_coal_tx_init(tp, ec);
9228 tg3_coal_rx_init(tp, ec);
9230 if (!tg3_flag(tp, 5705_PLUS)) {
9231 u32 val = ec->stats_block_coalesce_usecs;
9233 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9234 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9239 tw32(HOSTCC_STAT_COAL_TICKS, val);
9243 /* tp->lock is held. */
9244 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9248 /* Disable all transmit rings but the first. */
9249 if (!tg3_flag(tp, 5705_PLUS))
9250 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9251 else if (tg3_flag(tp, 5717_PLUS))
9252 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9253 else if (tg3_flag(tp, 57765_CLASS) ||
9254 tg3_asic_rev(tp) == ASIC_REV_5762)
9255 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9257 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9259 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9260 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9261 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9262 BDINFO_FLAGS_DISABLED);
9265 /* tp->lock is held. */
9266 static void tg3_tx_rcbs_init(struct tg3 *tp)
9269 u32 txrcb = NIC_SRAM_SEND_RCB;
9271 if (tg3_flag(tp, ENABLE_TSS))
9274 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9275 struct tg3_napi *tnapi = &tp->napi[i];
9277 if (!tnapi->tx_ring)
9280 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9281 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9282 NIC_SRAM_TX_BUFFER_DESC);
9286 /* tp->lock is held. */
9287 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9291 /* Disable all receive return rings but the first. */
9292 if (tg3_flag(tp, 5717_PLUS))
9293 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9294 else if (!tg3_flag(tp, 5705_PLUS))
9295 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9296 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9297 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9298 tg3_flag(tp, 57765_CLASS))
9299 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9301 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9303 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9304 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9305 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9306 BDINFO_FLAGS_DISABLED);
9309 /* tp->lock is held. */
9310 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9313 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9315 if (tg3_flag(tp, ENABLE_RSS))
9318 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9319 struct tg3_napi *tnapi = &tp->napi[i];
9324 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9325 (tp->rx_ret_ring_mask + 1) <<
9326 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9330 /* tp->lock is held. */
9331 static void tg3_rings_reset(struct tg3 *tp)
9335 struct tg3_napi *tnapi = &tp->napi[0];
9337 tg3_tx_rcbs_disable(tp);
9339 tg3_rx_ret_rcbs_disable(tp);
9341 /* Disable interrupts */
9342 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9343 tp->napi[0].chk_msi_cnt = 0;
9344 tp->napi[0].last_rx_cons = 0;
9345 tp->napi[0].last_tx_cons = 0;
9347 /* Zero mailbox registers. */
9348 if (tg3_flag(tp, SUPPORT_MSIX)) {
9349 for (i = 1; i < tp->irq_max; i++) {
9350 tp->napi[i].tx_prod = 0;
9351 tp->napi[i].tx_cons = 0;
9352 if (tg3_flag(tp, ENABLE_TSS))
9353 tw32_mailbox(tp->napi[i].prodmbox, 0);
9354 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9355 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9356 tp->napi[i].chk_msi_cnt = 0;
9357 tp->napi[i].last_rx_cons = 0;
9358 tp->napi[i].last_tx_cons = 0;
9360 if (!tg3_flag(tp, ENABLE_TSS))
9361 tw32_mailbox(tp->napi[0].prodmbox, 0);
9363 tp->napi[0].tx_prod = 0;
9364 tp->napi[0].tx_cons = 0;
9365 tw32_mailbox(tp->napi[0].prodmbox, 0);
9366 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9369 /* Make sure the NIC-based send BD rings are disabled. */
9370 if (!tg3_flag(tp, 5705_PLUS)) {
9371 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9372 for (i = 0; i < 16; i++)
9373 tw32_tx_mbox(mbox + i * 8, 0);
9376 /* Clear status block in ram. */
9377 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9379 /* Set status block DMA address */
9380 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9381 ((u64) tnapi->status_mapping >> 32));
9382 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9383 ((u64) tnapi->status_mapping & 0xffffffff));
9385 stblk = HOSTCC_STATBLCK_RING1;
9387 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9388 u64 mapping = (u64)tnapi->status_mapping;
9389 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9390 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9393 /* Clear status block in ram. */
9394 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9397 tg3_tx_rcbs_init(tp);
9398 tg3_rx_ret_rcbs_init(tp);
9401 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9403 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9405 if (!tg3_flag(tp, 5750_PLUS) ||
9406 tg3_flag(tp, 5780_CLASS) ||
9407 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9408 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9409 tg3_flag(tp, 57765_PLUS))
9410 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9411 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9412 tg3_asic_rev(tp) == ASIC_REV_5787)
9413 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9415 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9417 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9418 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9420 val = min(nic_rep_thresh, host_rep_thresh);
9421 tw32(RCVBDI_STD_THRESH, val);
9423 if (tg3_flag(tp, 57765_PLUS))
9424 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9426 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9429 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9431 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9433 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9434 tw32(RCVBDI_JUMBO_THRESH, val);
9436 if (tg3_flag(tp, 57765_PLUS))
9437 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9440 static inline u32 calc_crc(unsigned char *buf, int len)
9448 for (j = 0; j < len; j++) {
9451 for (k = 0; k < 8; k++) {
9464 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9466 /* accept or reject all multicast frames */
9467 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9468 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9469 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9470 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9473 static void __tg3_set_rx_mode(struct net_device *dev)
9475 struct tg3 *tp = netdev_priv(dev);
9478 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9479 RX_MODE_KEEP_VLAN_TAG);
9481 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9482 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9485 if (!tg3_flag(tp, ENABLE_ASF))
9486 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9489 if (dev->flags & IFF_PROMISC) {
9490 /* Promiscuous mode. */
9491 rx_mode |= RX_MODE_PROMISC;
9492 } else if (dev->flags & IFF_ALLMULTI) {
9493 /* Accept all multicast. */
9494 tg3_set_multi(tp, 1);
9495 } else if (netdev_mc_empty(dev)) {
9496 /* Reject all multicast. */
9497 tg3_set_multi(tp, 0);
9499 /* Accept one or more multicast(s). */
9500 struct netdev_hw_addr *ha;
9501 u32 mc_filter[4] = { 0, };
9506 netdev_for_each_mc_addr(ha, dev) {
9507 crc = calc_crc(ha->addr, ETH_ALEN);
9509 regidx = (bit & 0x60) >> 5;
9511 mc_filter[regidx] |= (1 << bit);
9514 tw32(MAC_HASH_REG_0, mc_filter[0]);
9515 tw32(MAC_HASH_REG_1, mc_filter[1]);
9516 tw32(MAC_HASH_REG_2, mc_filter[2]);
9517 tw32(MAC_HASH_REG_3, mc_filter[3]);
9520 if (rx_mode != tp->rx_mode) {
9521 tp->rx_mode = rx_mode;
9522 tw32_f(MAC_RX_MODE, rx_mode);
9527 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9531 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9532 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9535 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9539 if (!tg3_flag(tp, SUPPORT_MSIX))
9542 if (tp->rxq_cnt == 1) {
9543 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9547 /* Validate table against current IRQ count */
9548 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9549 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9553 if (i != TG3_RSS_INDIR_TBL_SIZE)
9554 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9557 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9560 u32 reg = MAC_RSS_INDIR_TBL_0;
9562 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9563 u32 val = tp->rss_ind_tbl[i];
9565 for (; i % 8; i++) {
9567 val |= tp->rss_ind_tbl[i];
9574 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9576 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9577 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9579 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9582 /* tp->lock is held. */
9583 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9585 u32 val, rdmac_mode;
9587 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9589 tg3_disable_ints(tp);
9593 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9595 if (tg3_flag(tp, INIT_COMPLETE))
9596 tg3_abort_hw(tp, 1);
9598 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9599 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9600 tg3_phy_pull_config(tp);
9601 tg3_eee_pull_config(tp, NULL);
9602 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9605 /* Enable MAC control of LPI */
9606 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9612 err = tg3_chip_reset(tp);
9616 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9618 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9619 val = tr32(TG3_CPMU_CTRL);
9620 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9621 tw32(TG3_CPMU_CTRL, val);
9623 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9624 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9625 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9626 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9628 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9629 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9630 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9631 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9633 val = tr32(TG3_CPMU_HST_ACC);
9634 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9635 val |= CPMU_HST_ACC_MACCLK_6_25;
9636 tw32(TG3_CPMU_HST_ACC, val);
9639 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9640 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9641 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9642 PCIE_PWR_MGMT_L1_THRESH_4MS;
9643 tw32(PCIE_PWR_MGMT_THRESH, val);
9645 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9646 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9648 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9650 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9651 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9654 if (tg3_flag(tp, L1PLLPD_EN)) {
9655 u32 grc_mode = tr32(GRC_MODE);
9657 /* Access the lower 1K of PL PCIE block registers. */
9658 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9659 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9661 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9662 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9663 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9665 tw32(GRC_MODE, grc_mode);
9668 if (tg3_flag(tp, 57765_CLASS)) {
9669 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9670 u32 grc_mode = tr32(GRC_MODE);
9672 /* Access the lower 1K of PL PCIE block registers. */
9673 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9674 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9676 val = tr32(TG3_PCIE_TLDLPL_PORT +
9677 TG3_PCIE_PL_LO_PHYCTL5);
9678 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9679 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9681 tw32(GRC_MODE, grc_mode);
9684 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9687 /* Fix transmit hangs */
9688 val = tr32(TG3_CPMU_PADRNG_CTL);
9689 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9690 tw32(TG3_CPMU_PADRNG_CTL, val);
9692 grc_mode = tr32(GRC_MODE);
9694 /* Access the lower 1K of DL PCIE block registers. */
9695 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9696 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9698 val = tr32(TG3_PCIE_TLDLPL_PORT +
9699 TG3_PCIE_DL_LO_FTSMAX);
9700 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9701 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9702 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9704 tw32(GRC_MODE, grc_mode);
9707 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9708 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9709 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9710 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9713 /* This works around an issue with Athlon chipsets on
9714 * B3 tigon3 silicon. This bit has no effect on any
9715 * other revision. But do not set this on PCI Express
9716 * chips and don't even touch the clocks if the CPMU is present.
9718 if (!tg3_flag(tp, CPMU_PRESENT)) {
9719 if (!tg3_flag(tp, PCI_EXPRESS))
9720 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9721 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9724 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9725 tg3_flag(tp, PCIX_MODE)) {
9726 val = tr32(TG3PCI_PCISTATE);
9727 val |= PCISTATE_RETRY_SAME_DMA;
9728 tw32(TG3PCI_PCISTATE, val);
9731 if (tg3_flag(tp, ENABLE_APE)) {
9732 /* Allow reads and writes to the
9733 * APE register and memory space.
9735 val = tr32(TG3PCI_PCISTATE);
9736 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9737 PCISTATE_ALLOW_APE_SHMEM_WR |
9738 PCISTATE_ALLOW_APE_PSPACE_WR;
9739 tw32(TG3PCI_PCISTATE, val);
9742 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9743 /* Enable some hw fixes. */
9744 val = tr32(TG3PCI_MSI_DATA);
9745 val |= (1 << 26) | (1 << 28) | (1 << 29);
9746 tw32(TG3PCI_MSI_DATA, val);
9749 /* Descriptor ring init may make accesses to the
9750 * NIC SRAM area to setup the TX descriptors, so we
9751 * can only do this after the hardware has been
9752 * successfully reset.
9754 err = tg3_init_rings(tp);
9758 if (tg3_flag(tp, 57765_PLUS)) {
9759 val = tr32(TG3PCI_DMA_RW_CTRL) &
9760 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9761 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9762 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9763 if (!tg3_flag(tp, 57765_CLASS) &&
9764 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9765 tg3_asic_rev(tp) != ASIC_REV_5762)
9766 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9767 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9768 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9769 tg3_asic_rev(tp) != ASIC_REV_5761) {
9770 /* This value is determined during the probe time DMA
9771 * engine test, tg3_test_dma.
9773 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9776 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9777 GRC_MODE_4X_NIC_SEND_RINGS |
9778 GRC_MODE_NO_TX_PHDR_CSUM |
9779 GRC_MODE_NO_RX_PHDR_CSUM);
9780 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9782 /* Pseudo-header checksum is done by hardware logic and not
9783 * the offload processers, so make the chip do the pseudo-
9784 * header checksums on receive. For transmit it is more
9785 * convenient to do the pseudo-header checksum in software
9786 * as Linux does that on transmit for us in all cases.
9788 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9790 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9792 tw32(TG3_RX_PTP_CTL,
9793 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9795 if (tg3_flag(tp, PTP_CAPABLE))
9796 val |= GRC_MODE_TIME_SYNC_ENABLE;
9798 tw32(GRC_MODE, tp->grc_mode | val);
9800 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9801 val = tr32(GRC_MISC_CFG);
9803 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9804 tw32(GRC_MISC_CFG, val);
9806 /* Initialize MBUF/DESC pool. */
9807 if (tg3_flag(tp, 5750_PLUS)) {
9809 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9810 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9811 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9812 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9814 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9815 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9816 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9817 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9820 fw_len = tp->fw_len;
9821 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9822 tw32(BUFMGR_MB_POOL_ADDR,
9823 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9824 tw32(BUFMGR_MB_POOL_SIZE,
9825 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9828 if (tp->dev->mtu <= ETH_DATA_LEN) {
9829 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9830 tp->bufmgr_config.mbuf_read_dma_low_water);
9831 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9832 tp->bufmgr_config.mbuf_mac_rx_low_water);
9833 tw32(BUFMGR_MB_HIGH_WATER,
9834 tp->bufmgr_config.mbuf_high_water);
9836 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9837 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9838 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9839 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9840 tw32(BUFMGR_MB_HIGH_WATER,
9841 tp->bufmgr_config.mbuf_high_water_jumbo);
9843 tw32(BUFMGR_DMA_LOW_WATER,
9844 tp->bufmgr_config.dma_low_water);
9845 tw32(BUFMGR_DMA_HIGH_WATER,
9846 tp->bufmgr_config.dma_high_water);
9848 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9849 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9850 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9851 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9852 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9853 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9854 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9855 tw32(BUFMGR_MODE, val);
9856 for (i = 0; i < 2000; i++) {
9857 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9862 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9866 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9867 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9869 tg3_setup_rxbd_thresholds(tp);
9871 /* Initialize TG3_BDINFO's at:
9872 * RCVDBDI_STD_BD: standard eth size rx ring
9873 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9874 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9877 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9878 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9879 * ring attribute flags
9880 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9882 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9883 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9885 * The size of each ring is fixed in the firmware, but the location is
9888 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9889 ((u64) tpr->rx_std_mapping >> 32));
9890 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9891 ((u64) tpr->rx_std_mapping & 0xffffffff));
9892 if (!tg3_flag(tp, 5717_PLUS))
9893 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9894 NIC_SRAM_RX_BUFFER_DESC);
9896 /* Disable the mini ring */
9897 if (!tg3_flag(tp, 5705_PLUS))
9898 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9899 BDINFO_FLAGS_DISABLED);
9901 /* Program the jumbo buffer descriptor ring control
9902 * blocks on those devices that have them.
9904 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9905 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9907 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9908 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9909 ((u64) tpr->rx_jmb_mapping >> 32));
9910 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9911 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9912 val = TG3_RX_JMB_RING_SIZE(tp) <<
9913 BDINFO_FLAGS_MAXLEN_SHIFT;
9914 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9915 val | BDINFO_FLAGS_USE_EXT_RECV);
9916 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9917 tg3_flag(tp, 57765_CLASS) ||
9918 tg3_asic_rev(tp) == ASIC_REV_5762)
9919 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9920 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9922 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9923 BDINFO_FLAGS_DISABLED);
9926 if (tg3_flag(tp, 57765_PLUS)) {
9927 val = TG3_RX_STD_RING_SIZE(tp);
9928 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9929 val |= (TG3_RX_STD_DMA_SZ << 2);
9931 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9933 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9935 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9937 tpr->rx_std_prod_idx = tp->rx_pending;
9938 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9940 tpr->rx_jmb_prod_idx =
9941 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9942 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9944 tg3_rings_reset(tp);
9946 /* Initialize MAC address and backoff seed. */
9947 __tg3_set_mac_addr(tp, false);
9949 /* MTU + ethernet header + FCS + optional VLAN tag */
9950 tw32(MAC_RX_MTU_SIZE,
9951 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9953 /* The slot time is changed by tg3_setup_phy if we
9954 * run at gigabit with half duplex.
9956 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9957 (6 << TX_LENGTHS_IPG_SHIFT) |
9958 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9960 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9961 tg3_asic_rev(tp) == ASIC_REV_5762)
9962 val |= tr32(MAC_TX_LENGTHS) &
9963 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9964 TX_LENGTHS_CNT_DWN_VAL_MSK);
9966 tw32(MAC_TX_LENGTHS, val);
9968 /* Receive rules. */
9969 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9970 tw32(RCVLPC_CONFIG, 0x0181);
9972 /* Calculate RDMAC_MODE setting early, we need it to determine
9973 * the RCVLPC_STATE_ENABLE mask.
9975 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9976 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9977 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9978 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9979 RDMAC_MODE_LNGREAD_ENAB);
9981 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9982 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9984 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9985 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9986 tg3_asic_rev(tp) == ASIC_REV_57780)
9987 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9988 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9989 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9991 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9992 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9993 if (tg3_flag(tp, TSO_CAPABLE) &&
9994 tg3_asic_rev(tp) == ASIC_REV_5705) {
9995 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9996 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9997 !tg3_flag(tp, IS_5788)) {
9998 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10002 if (tg3_flag(tp, PCI_EXPRESS))
10003 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10005 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10007 if (tp->dev->mtu <= ETH_DATA_LEN) {
10008 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10009 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10013 if (tg3_flag(tp, HW_TSO_1) ||
10014 tg3_flag(tp, HW_TSO_2) ||
10015 tg3_flag(tp, HW_TSO_3))
10016 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10018 if (tg3_flag(tp, 57765_PLUS) ||
10019 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10020 tg3_asic_rev(tp) == ASIC_REV_57780)
10021 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10023 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10024 tg3_asic_rev(tp) == ASIC_REV_5762)
10025 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10027 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10028 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10029 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10030 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10031 tg3_flag(tp, 57765_PLUS)) {
10034 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10035 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10037 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10039 val = tr32(tgtreg);
10040 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10041 tg3_asic_rev(tp) == ASIC_REV_5762) {
10042 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10043 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10044 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10045 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10046 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10047 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10049 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10052 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10053 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10054 tg3_asic_rev(tp) == ASIC_REV_5762) {
10057 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10058 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10060 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10062 val = tr32(tgtreg);
10064 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10065 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10068 /* Receive/send statistics. */
10069 if (tg3_flag(tp, 5750_PLUS)) {
10070 val = tr32(RCVLPC_STATS_ENABLE);
10071 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10072 tw32(RCVLPC_STATS_ENABLE, val);
10073 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10074 tg3_flag(tp, TSO_CAPABLE)) {
10075 val = tr32(RCVLPC_STATS_ENABLE);
10076 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10077 tw32(RCVLPC_STATS_ENABLE, val);
10079 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10081 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10082 tw32(SNDDATAI_STATSENAB, 0xffffff);
10083 tw32(SNDDATAI_STATSCTRL,
10084 (SNDDATAI_SCTRL_ENABLE |
10085 SNDDATAI_SCTRL_FASTUPD));
10087 /* Setup host coalescing engine. */
10088 tw32(HOSTCC_MODE, 0);
10089 for (i = 0; i < 2000; i++) {
10090 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10095 __tg3_set_coalesce(tp, &tp->coal);
10097 if (!tg3_flag(tp, 5705_PLUS)) {
10098 /* Status/statistics block address. See tg3_timer,
10099 * the tg3_periodic_fetch_stats call there, and
10100 * tg3_get_stats to see how this works for 5705/5750 chips.
10102 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10103 ((u64) tp->stats_mapping >> 32));
10104 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10105 ((u64) tp->stats_mapping & 0xffffffff));
10106 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10108 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10110 /* Clear statistics and status block memory areas */
10111 for (i = NIC_SRAM_STATS_BLK;
10112 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10113 i += sizeof(u32)) {
10114 tg3_write_mem(tp, i, 0);
10119 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10121 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10122 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10123 if (!tg3_flag(tp, 5705_PLUS))
10124 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10126 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10127 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10128 /* reset to prevent losing 1st rx packet intermittently */
10129 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10133 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10134 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10135 MAC_MODE_FHDE_ENABLE;
10136 if (tg3_flag(tp, ENABLE_APE))
10137 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10138 if (!tg3_flag(tp, 5705_PLUS) &&
10139 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10140 tg3_asic_rev(tp) != ASIC_REV_5700)
10141 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10142 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10145 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10146 * If TG3_FLAG_IS_NIC is zero, we should read the
10147 * register to preserve the GPIO settings for LOMs. The GPIOs,
10148 * whether used as inputs or outputs, are set by boot code after
10151 if (!tg3_flag(tp, IS_NIC)) {
10154 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10155 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10156 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10158 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10159 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10160 GRC_LCLCTRL_GPIO_OUTPUT3;
10162 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10163 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10165 tp->grc_local_ctrl &= ~gpio_mask;
10166 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10168 /* GPIO1 must be driven high for eeprom write protect */
10169 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10170 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10171 GRC_LCLCTRL_GPIO_OUTPUT1);
10173 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10176 if (tg3_flag(tp, USING_MSIX)) {
10177 val = tr32(MSGINT_MODE);
10178 val |= MSGINT_MODE_ENABLE;
10179 if (tp->irq_cnt > 1)
10180 val |= MSGINT_MODE_MULTIVEC_EN;
10181 if (!tg3_flag(tp, 1SHOT_MSI))
10182 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10183 tw32(MSGINT_MODE, val);
10186 if (!tg3_flag(tp, 5705_PLUS)) {
10187 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10191 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10192 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10193 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10194 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10195 WDMAC_MODE_LNGREAD_ENAB);
10197 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10198 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10199 if (tg3_flag(tp, TSO_CAPABLE) &&
10200 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10201 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10203 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10204 !tg3_flag(tp, IS_5788)) {
10205 val |= WDMAC_MODE_RX_ACCEL;
10209 /* Enable host coalescing bug fix */
10210 if (tg3_flag(tp, 5755_PLUS))
10211 val |= WDMAC_MODE_STATUS_TAG_FIX;
10213 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10214 val |= WDMAC_MODE_BURST_ALL_DATA;
10216 tw32_f(WDMAC_MODE, val);
10219 if (tg3_flag(tp, PCIX_MODE)) {
10222 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10224 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10225 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10226 pcix_cmd |= PCI_X_CMD_READ_2K;
10227 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10228 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10229 pcix_cmd |= PCI_X_CMD_READ_2K;
10231 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10235 tw32_f(RDMAC_MODE, rdmac_mode);
10238 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10239 tg3_asic_rev(tp) == ASIC_REV_5720) {
10240 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10241 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10244 if (i < TG3_NUM_RDMA_CHANNELS) {
10245 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10246 val |= tg3_lso_rd_dma_workaround_bit(tp);
10247 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10248 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10252 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10253 if (!tg3_flag(tp, 5705_PLUS))
10254 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10256 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10257 tw32(SNDDATAC_MODE,
10258 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10260 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10262 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10263 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10264 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10265 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10266 val |= RCVDBDI_MODE_LRG_RING_SZ;
10267 tw32(RCVDBDI_MODE, val);
10268 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10269 if (tg3_flag(tp, HW_TSO_1) ||
10270 tg3_flag(tp, HW_TSO_2) ||
10271 tg3_flag(tp, HW_TSO_3))
10272 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10273 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10274 if (tg3_flag(tp, ENABLE_TSS))
10275 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10276 tw32(SNDBDI_MODE, val);
10277 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10279 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10280 err = tg3_load_5701_a0_firmware_fix(tp);
10285 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10286 /* Ignore any errors for the firmware download. If download
10287 * fails, the device will operate with EEE disabled
10289 tg3_load_57766_firmware(tp);
10292 if (tg3_flag(tp, TSO_CAPABLE)) {
10293 err = tg3_load_tso_firmware(tp);
10298 tp->tx_mode = TX_MODE_ENABLE;
10300 if (tg3_flag(tp, 5755_PLUS) ||
10301 tg3_asic_rev(tp) == ASIC_REV_5906)
10302 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10304 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10305 tg3_asic_rev(tp) == ASIC_REV_5762) {
10306 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10307 tp->tx_mode &= ~val;
10308 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10311 tw32_f(MAC_TX_MODE, tp->tx_mode);
10314 if (tg3_flag(tp, ENABLE_RSS)) {
10315 tg3_rss_write_indir_tbl(tp);
10317 /* Setup the "secret" hash key. */
10318 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10319 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10320 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10321 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10322 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10323 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10324 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10325 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10326 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10327 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10330 tp->rx_mode = RX_MODE_ENABLE;
10331 if (tg3_flag(tp, 5755_PLUS))
10332 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10334 if (tg3_flag(tp, ENABLE_RSS))
10335 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10336 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10337 RX_MODE_RSS_IPV6_HASH_EN |
10338 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10339 RX_MODE_RSS_IPV4_HASH_EN |
10340 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10342 tw32_f(MAC_RX_MODE, tp->rx_mode);
10345 tw32(MAC_LED_CTRL, tp->led_ctrl);
10347 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10348 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10349 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10352 tw32_f(MAC_RX_MODE, tp->rx_mode);
10355 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10356 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10357 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10358 /* Set drive transmission level to 1.2V */
10359 /* only if the signal pre-emphasis bit is not set */
10360 val = tr32(MAC_SERDES_CFG);
10363 tw32(MAC_SERDES_CFG, val);
10365 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10366 tw32(MAC_SERDES_CFG, 0x616000);
10369 /* Prevent chip from dropping frames when flow control
10372 if (tg3_flag(tp, 57765_CLASS))
10376 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10378 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10379 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10380 /* Use hardware link auto-negotiation */
10381 tg3_flag_set(tp, HW_AUTONEG);
10384 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10385 tg3_asic_rev(tp) == ASIC_REV_5714) {
10388 tmp = tr32(SERDES_RX_CTRL);
10389 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10390 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10391 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10392 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10395 if (!tg3_flag(tp, USE_PHYLIB)) {
10396 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10397 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10399 err = tg3_setup_phy(tp, false);
10403 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10404 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10407 /* Clear CRC stats. */
10408 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10409 tg3_writephy(tp, MII_TG3_TEST1,
10410 tmp | MII_TG3_TEST1_CRC_EN);
10411 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10416 __tg3_set_rx_mode(tp->dev);
10418 /* Initialize receive rules. */
10419 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10420 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10421 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10422 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10424 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10428 if (tg3_flag(tp, ENABLE_ASF))
10432 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10434 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10436 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10438 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10440 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10442 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10444 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10446 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10448 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10450 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10452 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10454 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10456 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10458 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10466 if (tg3_flag(tp, ENABLE_APE))
10467 /* Write our heartbeat update interval to APE. */
10468 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10469 APE_HOST_HEARTBEAT_INT_DISABLE);
10471 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10476 /* Called at device open time to get the chip ready for
10477 * packet processing. Invoked with tp->lock held.
10479 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10481 /* Chip may have been just powered on. If so, the boot code may still
10482 * be running initialization. Wait for it to finish to avoid races in
10483 * accessing the hardware.
10485 tg3_enable_register_access(tp);
10488 tg3_switch_clocks(tp);
10490 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10492 return tg3_reset_hw(tp, reset_phy);
10495 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10499 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10500 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10502 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10505 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10506 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10507 memset(ocir, 0, TG3_OCIR_LEN);
10511 /* sysfs attributes for hwmon */
10512 static ssize_t tg3_show_temp(struct device *dev,
10513 struct device_attribute *devattr, char *buf)
10515 struct pci_dev *pdev = to_pci_dev(dev);
10516 struct net_device *netdev = pci_get_drvdata(pdev);
10517 struct tg3 *tp = netdev_priv(netdev);
10518 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10521 spin_lock_bh(&tp->lock);
10522 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10523 sizeof(temperature));
10524 spin_unlock_bh(&tp->lock);
10525 return sprintf(buf, "%u\n", temperature);
10529 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10530 TG3_TEMP_SENSOR_OFFSET);
10531 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10532 TG3_TEMP_CAUTION_OFFSET);
10533 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10534 TG3_TEMP_MAX_OFFSET);
10536 static struct attribute *tg3_attributes[] = {
10537 &sensor_dev_attr_temp1_input.dev_attr.attr,
10538 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10539 &sensor_dev_attr_temp1_max.dev_attr.attr,
10543 static const struct attribute_group tg3_group = {
10544 .attrs = tg3_attributes,
10547 static void tg3_hwmon_close(struct tg3 *tp)
10549 if (tp->hwmon_dev) {
10550 hwmon_device_unregister(tp->hwmon_dev);
10551 tp->hwmon_dev = NULL;
10552 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10556 static void tg3_hwmon_open(struct tg3 *tp)
10560 struct pci_dev *pdev = tp->pdev;
10561 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10563 tg3_sd_scan_scratchpad(tp, ocirs);
10565 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10566 if (!ocirs[i].src_data_length)
10569 size += ocirs[i].src_hdr_length;
10570 size += ocirs[i].src_data_length;
10576 /* Register hwmon sysfs hooks */
10577 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10579 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10583 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10584 if (IS_ERR(tp->hwmon_dev)) {
10585 tp->hwmon_dev = NULL;
10586 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10587 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10592 #define TG3_STAT_ADD32(PSTAT, REG) \
10593 do { u32 __val = tr32(REG); \
10594 (PSTAT)->low += __val; \
10595 if ((PSTAT)->low < __val) \
10596 (PSTAT)->high += 1; \
10599 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10601 struct tg3_hw_stats *sp = tp->hw_stats;
10606 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10607 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10608 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10609 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10610 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10611 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10612 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10613 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10614 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10615 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10616 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10617 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10618 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10619 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10620 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10621 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10624 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10625 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10626 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10627 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10630 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10631 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10632 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10633 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10634 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10635 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10636 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10637 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10638 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10639 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10640 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10641 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10642 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10643 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10645 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10646 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10647 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10648 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10649 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10651 u32 val = tr32(HOSTCC_FLOW_ATTN);
10652 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10654 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10655 sp->rx_discards.low += val;
10656 if (sp->rx_discards.low < val)
10657 sp->rx_discards.high += 1;
10659 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10661 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10664 static void tg3_chk_missed_msi(struct tg3 *tp)
10668 for (i = 0; i < tp->irq_cnt; i++) {
10669 struct tg3_napi *tnapi = &tp->napi[i];
10671 if (tg3_has_work(tnapi)) {
10672 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10673 tnapi->last_tx_cons == tnapi->tx_cons) {
10674 if (tnapi->chk_msi_cnt < 1) {
10675 tnapi->chk_msi_cnt++;
10681 tnapi->chk_msi_cnt = 0;
10682 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10683 tnapi->last_tx_cons = tnapi->tx_cons;
10687 static void tg3_timer(unsigned long __opaque)
10689 struct tg3 *tp = (struct tg3 *) __opaque;
10691 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10692 goto restart_timer;
10694 spin_lock(&tp->lock);
10696 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10697 tg3_flag(tp, 57765_CLASS))
10698 tg3_chk_missed_msi(tp);
10700 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10701 /* BCM4785: Flush posted writes from GbE to host memory. */
10705 if (!tg3_flag(tp, TAGGED_STATUS)) {
10706 /* All of this garbage is because when using non-tagged
10707 * IRQ status the mailbox/status_block protocol the chip
10708 * uses with the cpu is race prone.
10710 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10711 tw32(GRC_LOCAL_CTRL,
10712 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10714 tw32(HOSTCC_MODE, tp->coalesce_mode |
10715 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10718 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10719 spin_unlock(&tp->lock);
10720 tg3_reset_task_schedule(tp);
10721 goto restart_timer;
10725 /* This part only runs once per second. */
10726 if (!--tp->timer_counter) {
10727 if (tg3_flag(tp, 5705_PLUS))
10728 tg3_periodic_fetch_stats(tp);
10730 if (tp->setlpicnt && !--tp->setlpicnt)
10731 tg3_phy_eee_enable(tp);
10733 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10737 mac_stat = tr32(MAC_STATUS);
10740 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10741 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10743 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10747 tg3_setup_phy(tp, false);
10748 } else if (tg3_flag(tp, POLL_SERDES)) {
10749 u32 mac_stat = tr32(MAC_STATUS);
10750 int need_setup = 0;
10753 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10756 if (!tp->link_up &&
10757 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10758 MAC_STATUS_SIGNAL_DET))) {
10762 if (!tp->serdes_counter) {
10765 ~MAC_MODE_PORT_MODE_MASK));
10767 tw32_f(MAC_MODE, tp->mac_mode);
10770 tg3_setup_phy(tp, false);
10772 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10773 tg3_flag(tp, 5780_CLASS)) {
10774 tg3_serdes_parallel_detect(tp);
10777 tp->timer_counter = tp->timer_multiplier;
10780 /* Heartbeat is only sent once every 2 seconds.
10782 * The heartbeat is to tell the ASF firmware that the host
10783 * driver is still alive. In the event that the OS crashes,
10784 * ASF needs to reset the hardware to free up the FIFO space
10785 * that may be filled with rx packets destined for the host.
10786 * If the FIFO is full, ASF will no longer function properly.
10788 * Unintended resets have been reported on real time kernels
10789 * where the timer doesn't run on time. Netpoll will also have
10792 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10793 * to check the ring condition when the heartbeat is expiring
10794 * before doing the reset. This will prevent most unintended
10797 if (!--tp->asf_counter) {
10798 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10799 tg3_wait_for_event_ack(tp);
10801 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10802 FWCMD_NICDRV_ALIVE3);
10803 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10804 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10805 TG3_FW_UPDATE_TIMEOUT_SEC);
10807 tg3_generate_fw_event(tp);
10809 tp->asf_counter = tp->asf_multiplier;
10812 spin_unlock(&tp->lock);
10815 tp->timer.expires = jiffies + tp->timer_offset;
10816 add_timer(&tp->timer);
10819 static void tg3_timer_init(struct tg3 *tp)
10821 if (tg3_flag(tp, TAGGED_STATUS) &&
10822 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10823 !tg3_flag(tp, 57765_CLASS))
10824 tp->timer_offset = HZ;
10826 tp->timer_offset = HZ / 10;
10828 BUG_ON(tp->timer_offset > HZ);
10830 tp->timer_multiplier = (HZ / tp->timer_offset);
10831 tp->asf_multiplier = (HZ / tp->timer_offset) *
10832 TG3_FW_UPDATE_FREQ_SEC;
10834 init_timer(&tp->timer);
10835 tp->timer.data = (unsigned long) tp;
10836 tp->timer.function = tg3_timer;
10839 static void tg3_timer_start(struct tg3 *tp)
10841 tp->asf_counter = tp->asf_multiplier;
10842 tp->timer_counter = tp->timer_multiplier;
10844 tp->timer.expires = jiffies + tp->timer_offset;
10845 add_timer(&tp->timer);
10848 static void tg3_timer_stop(struct tg3 *tp)
10850 del_timer_sync(&tp->timer);
10853 /* Restart hardware after configuration changes, self-test, etc.
10854 * Invoked with tp->lock held.
10856 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10857 __releases(tp->lock)
10858 __acquires(tp->lock)
10862 err = tg3_init_hw(tp, reset_phy);
10864 netdev_err(tp->dev,
10865 "Failed to re-initialize device, aborting\n");
10866 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10867 tg3_full_unlock(tp);
10868 tg3_timer_stop(tp);
10870 tg3_napi_enable(tp);
10871 dev_close(tp->dev);
10872 tg3_full_lock(tp, 0);
10877 static void tg3_reset_task(struct work_struct *work)
10879 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10882 tg3_full_lock(tp, 0);
10884 if (!netif_running(tp->dev)) {
10885 tg3_flag_clear(tp, RESET_TASK_PENDING);
10886 tg3_full_unlock(tp);
10890 tg3_full_unlock(tp);
10894 tg3_netif_stop(tp);
10896 tg3_full_lock(tp, 1);
10898 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10899 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10900 tp->write32_rx_mbox = tg3_write_flush_reg32;
10901 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10902 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10905 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10906 err = tg3_init_hw(tp, true);
10910 tg3_netif_start(tp);
10913 tg3_full_unlock(tp);
10918 tg3_flag_clear(tp, RESET_TASK_PENDING);
10921 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10924 unsigned long flags;
10926 struct tg3_napi *tnapi = &tp->napi[irq_num];
10928 if (tp->irq_cnt == 1)
10929 name = tp->dev->name;
10931 name = &tnapi->irq_lbl[0];
10932 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10933 name[IFNAMSIZ-1] = 0;
10936 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10938 if (tg3_flag(tp, 1SHOT_MSI))
10939 fn = tg3_msi_1shot;
10942 fn = tg3_interrupt;
10943 if (tg3_flag(tp, TAGGED_STATUS))
10944 fn = tg3_interrupt_tagged;
10945 flags = IRQF_SHARED;
10948 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10951 static int tg3_test_interrupt(struct tg3 *tp)
10953 struct tg3_napi *tnapi = &tp->napi[0];
10954 struct net_device *dev = tp->dev;
10955 int err, i, intr_ok = 0;
10958 if (!netif_running(dev))
10961 tg3_disable_ints(tp);
10963 free_irq(tnapi->irq_vec, tnapi);
10966 * Turn off MSI one shot mode. Otherwise this test has no
10967 * observable way to know whether the interrupt was delivered.
10969 if (tg3_flag(tp, 57765_PLUS)) {
10970 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10971 tw32(MSGINT_MODE, val);
10974 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10975 IRQF_SHARED, dev->name, tnapi);
10979 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10980 tg3_enable_ints(tp);
10982 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10985 for (i = 0; i < 5; i++) {
10986 u32 int_mbox, misc_host_ctrl;
10988 int_mbox = tr32_mailbox(tnapi->int_mbox);
10989 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10991 if ((int_mbox != 0) ||
10992 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10997 if (tg3_flag(tp, 57765_PLUS) &&
10998 tnapi->hw_status->status_tag != tnapi->last_tag)
10999 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11004 tg3_disable_ints(tp);
11006 free_irq(tnapi->irq_vec, tnapi);
11008 err = tg3_request_irq(tp, 0);
11014 /* Reenable MSI one shot mode. */
11015 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11016 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11017 tw32(MSGINT_MODE, val);
11025 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11026 * successfully restored
11028 static int tg3_test_msi(struct tg3 *tp)
11033 if (!tg3_flag(tp, USING_MSI))
11036 /* Turn off SERR reporting in case MSI terminates with Master
11039 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11040 pci_write_config_word(tp->pdev, PCI_COMMAND,
11041 pci_cmd & ~PCI_COMMAND_SERR);
11043 err = tg3_test_interrupt(tp);
11045 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11050 /* other failures */
11054 /* MSI test failed, go back to INTx mode */
11055 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11056 "to INTx mode. Please report this failure to the PCI "
11057 "maintainer and include system chipset information\n");
11059 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11061 pci_disable_msi(tp->pdev);
11063 tg3_flag_clear(tp, USING_MSI);
11064 tp->napi[0].irq_vec = tp->pdev->irq;
11066 err = tg3_request_irq(tp, 0);
11070 /* Need to reset the chip because the MSI cycle may have terminated
11071 * with Master Abort.
11073 tg3_full_lock(tp, 1);
11075 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11076 err = tg3_init_hw(tp, true);
11078 tg3_full_unlock(tp);
11081 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11086 static int tg3_request_firmware(struct tg3 *tp)
11088 const struct tg3_firmware_hdr *fw_hdr;
11090 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11091 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11096 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11098 /* Firmware blob starts with version numbers, followed by
11099 * start address and _full_ length including BSS sections
11100 * (which must be longer than the actual data, of course
11103 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11104 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11105 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11106 tp->fw_len, tp->fw_needed);
11107 release_firmware(tp->fw);
11112 /* We no longer need firmware; we have it. */
11113 tp->fw_needed = NULL;
11117 static u32 tg3_irq_count(struct tg3 *tp)
11119 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11122 /* We want as many rx rings enabled as there are cpus.
11123 * In multiqueue MSI-X mode, the first MSI-X vector
11124 * only deals with link interrupts, etc, so we add
11125 * one to the number of vectors we are requesting.
11127 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11133 static bool tg3_enable_msix(struct tg3 *tp)
11136 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11138 tp->txq_cnt = tp->txq_req;
11139 tp->rxq_cnt = tp->rxq_req;
11141 tp->rxq_cnt = netif_get_num_default_rss_queues();
11142 if (tp->rxq_cnt > tp->rxq_max)
11143 tp->rxq_cnt = tp->rxq_max;
11145 /* Disable multiple TX rings by default. Simple round-robin hardware
11146 * scheduling of the TX rings can cause starvation of rings with
11147 * small packets when other rings have TSO or jumbo packets.
11152 tp->irq_cnt = tg3_irq_count(tp);
11154 for (i = 0; i < tp->irq_max; i++) {
11155 msix_ent[i].entry = i;
11156 msix_ent[i].vector = 0;
11159 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11162 } else if (rc != 0) {
11163 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11165 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11168 tp->rxq_cnt = max(rc - 1, 1);
11170 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11173 for (i = 0; i < tp->irq_max; i++)
11174 tp->napi[i].irq_vec = msix_ent[i].vector;
11176 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11177 pci_disable_msix(tp->pdev);
11181 if (tp->irq_cnt == 1)
11184 tg3_flag_set(tp, ENABLE_RSS);
11186 if (tp->txq_cnt > 1)
11187 tg3_flag_set(tp, ENABLE_TSS);
11189 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11194 static void tg3_ints_init(struct tg3 *tp)
11196 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11197 !tg3_flag(tp, TAGGED_STATUS)) {
11198 /* All MSI supporting chips should support tagged
11199 * status. Assert that this is the case.
11201 netdev_warn(tp->dev,
11202 "MSI without TAGGED_STATUS? Not using MSI\n");
11206 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11207 tg3_flag_set(tp, USING_MSIX);
11208 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11209 tg3_flag_set(tp, USING_MSI);
11211 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11212 u32 msi_mode = tr32(MSGINT_MODE);
11213 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11214 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11215 if (!tg3_flag(tp, 1SHOT_MSI))
11216 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11217 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11220 if (!tg3_flag(tp, USING_MSIX)) {
11222 tp->napi[0].irq_vec = tp->pdev->irq;
11225 if (tp->irq_cnt == 1) {
11228 netif_set_real_num_tx_queues(tp->dev, 1);
11229 netif_set_real_num_rx_queues(tp->dev, 1);
11233 static void tg3_ints_fini(struct tg3 *tp)
11235 if (tg3_flag(tp, USING_MSIX))
11236 pci_disable_msix(tp->pdev);
11237 else if (tg3_flag(tp, USING_MSI))
11238 pci_disable_msi(tp->pdev);
11239 tg3_flag_clear(tp, USING_MSI);
11240 tg3_flag_clear(tp, USING_MSIX);
11241 tg3_flag_clear(tp, ENABLE_RSS);
11242 tg3_flag_clear(tp, ENABLE_TSS);
11245 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11248 struct net_device *dev = tp->dev;
11252 * Setup interrupts first so we know how
11253 * many NAPI resources to allocate
11257 tg3_rss_check_indir_tbl(tp);
11259 /* The placement of this call is tied
11260 * to the setup and use of Host TX descriptors.
11262 err = tg3_alloc_consistent(tp);
11264 goto out_ints_fini;
11268 tg3_napi_enable(tp);
11270 for (i = 0; i < tp->irq_cnt; i++) {
11271 struct tg3_napi *tnapi = &tp->napi[i];
11272 err = tg3_request_irq(tp, i);
11274 for (i--; i >= 0; i--) {
11275 tnapi = &tp->napi[i];
11276 free_irq(tnapi->irq_vec, tnapi);
11278 goto out_napi_fini;
11282 tg3_full_lock(tp, 0);
11285 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11287 err = tg3_init_hw(tp, reset_phy);
11289 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11290 tg3_free_rings(tp);
11293 tg3_full_unlock(tp);
11298 if (test_irq && tg3_flag(tp, USING_MSI)) {
11299 err = tg3_test_msi(tp);
11302 tg3_full_lock(tp, 0);
11303 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11304 tg3_free_rings(tp);
11305 tg3_full_unlock(tp);
11307 goto out_napi_fini;
11310 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11311 u32 val = tr32(PCIE_TRANSACTION_CFG);
11313 tw32(PCIE_TRANSACTION_CFG,
11314 val | PCIE_TRANS_CFG_1SHOT_MSI);
11320 tg3_hwmon_open(tp);
11322 tg3_full_lock(tp, 0);
11324 tg3_timer_start(tp);
11325 tg3_flag_set(tp, INIT_COMPLETE);
11326 tg3_enable_ints(tp);
11331 tg3_ptp_resume(tp);
11334 tg3_full_unlock(tp);
11336 netif_tx_start_all_queues(dev);
11339 * Reset loopback feature if it was turned on while the device was down
11340 * make sure that it's installed properly now.
11342 if (dev->features & NETIF_F_LOOPBACK)
11343 tg3_set_loopback(dev, dev->features);
11348 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11349 struct tg3_napi *tnapi = &tp->napi[i];
11350 free_irq(tnapi->irq_vec, tnapi);
11354 tg3_napi_disable(tp);
11356 tg3_free_consistent(tp);
11364 static void tg3_stop(struct tg3 *tp)
11368 tg3_reset_task_cancel(tp);
11369 tg3_netif_stop(tp);
11371 tg3_timer_stop(tp);
11373 tg3_hwmon_close(tp);
11377 tg3_full_lock(tp, 1);
11379 tg3_disable_ints(tp);
11381 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11382 tg3_free_rings(tp);
11383 tg3_flag_clear(tp, INIT_COMPLETE);
11385 tg3_full_unlock(tp);
11387 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11388 struct tg3_napi *tnapi = &tp->napi[i];
11389 free_irq(tnapi->irq_vec, tnapi);
11396 tg3_free_consistent(tp);
11399 static int tg3_open(struct net_device *dev)
11401 struct tg3 *tp = netdev_priv(dev);
11404 if (tp->fw_needed) {
11405 err = tg3_request_firmware(tp);
11406 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11408 netdev_warn(tp->dev, "EEE capability disabled\n");
11409 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11410 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11411 netdev_warn(tp->dev, "EEE capability restored\n");
11412 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11414 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11418 netdev_warn(tp->dev, "TSO capability disabled\n");
11419 tg3_flag_clear(tp, TSO_CAPABLE);
11420 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11421 netdev_notice(tp->dev, "TSO capability restored\n");
11422 tg3_flag_set(tp, TSO_CAPABLE);
11426 tg3_carrier_off(tp);
11428 err = tg3_power_up(tp);
11432 tg3_full_lock(tp, 0);
11434 tg3_disable_ints(tp);
11435 tg3_flag_clear(tp, INIT_COMPLETE);
11437 tg3_full_unlock(tp);
11439 err = tg3_start(tp,
11440 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11443 tg3_frob_aux_power(tp, false);
11444 pci_set_power_state(tp->pdev, PCI_D3hot);
11447 if (tg3_flag(tp, PTP_CAPABLE)) {
11448 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11450 if (IS_ERR(tp->ptp_clock))
11451 tp->ptp_clock = NULL;
11457 static int tg3_close(struct net_device *dev)
11459 struct tg3 *tp = netdev_priv(dev);
11465 /* Clear stats across close / open calls */
11466 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11467 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11469 tg3_power_down(tp);
11471 tg3_carrier_off(tp);
11476 static inline u64 get_stat64(tg3_stat64_t *val)
11478 return ((u64)val->high << 32) | ((u64)val->low);
11481 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11483 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11485 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11486 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11487 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11490 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11491 tg3_writephy(tp, MII_TG3_TEST1,
11492 val | MII_TG3_TEST1_CRC_EN);
11493 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11497 tp->phy_crc_errors += val;
11499 return tp->phy_crc_errors;
11502 return get_stat64(&hw_stats->rx_fcs_errors);
11505 #define ESTAT_ADD(member) \
11506 estats->member = old_estats->member + \
11507 get_stat64(&hw_stats->member)
11509 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11511 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11512 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11514 ESTAT_ADD(rx_octets);
11515 ESTAT_ADD(rx_fragments);
11516 ESTAT_ADD(rx_ucast_packets);
11517 ESTAT_ADD(rx_mcast_packets);
11518 ESTAT_ADD(rx_bcast_packets);
11519 ESTAT_ADD(rx_fcs_errors);
11520 ESTAT_ADD(rx_align_errors);
11521 ESTAT_ADD(rx_xon_pause_rcvd);
11522 ESTAT_ADD(rx_xoff_pause_rcvd);
11523 ESTAT_ADD(rx_mac_ctrl_rcvd);
11524 ESTAT_ADD(rx_xoff_entered);
11525 ESTAT_ADD(rx_frame_too_long_errors);
11526 ESTAT_ADD(rx_jabbers);
11527 ESTAT_ADD(rx_undersize_packets);
11528 ESTAT_ADD(rx_in_length_errors);
11529 ESTAT_ADD(rx_out_length_errors);
11530 ESTAT_ADD(rx_64_or_less_octet_packets);
11531 ESTAT_ADD(rx_65_to_127_octet_packets);
11532 ESTAT_ADD(rx_128_to_255_octet_packets);
11533 ESTAT_ADD(rx_256_to_511_octet_packets);
11534 ESTAT_ADD(rx_512_to_1023_octet_packets);
11535 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11536 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11537 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11538 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11539 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11541 ESTAT_ADD(tx_octets);
11542 ESTAT_ADD(tx_collisions);
11543 ESTAT_ADD(tx_xon_sent);
11544 ESTAT_ADD(tx_xoff_sent);
11545 ESTAT_ADD(tx_flow_control);
11546 ESTAT_ADD(tx_mac_errors);
11547 ESTAT_ADD(tx_single_collisions);
11548 ESTAT_ADD(tx_mult_collisions);
11549 ESTAT_ADD(tx_deferred);
11550 ESTAT_ADD(tx_excessive_collisions);
11551 ESTAT_ADD(tx_late_collisions);
11552 ESTAT_ADD(tx_collide_2times);
11553 ESTAT_ADD(tx_collide_3times);
11554 ESTAT_ADD(tx_collide_4times);
11555 ESTAT_ADD(tx_collide_5times);
11556 ESTAT_ADD(tx_collide_6times);
11557 ESTAT_ADD(tx_collide_7times);
11558 ESTAT_ADD(tx_collide_8times);
11559 ESTAT_ADD(tx_collide_9times);
11560 ESTAT_ADD(tx_collide_10times);
11561 ESTAT_ADD(tx_collide_11times);
11562 ESTAT_ADD(tx_collide_12times);
11563 ESTAT_ADD(tx_collide_13times);
11564 ESTAT_ADD(tx_collide_14times);
11565 ESTAT_ADD(tx_collide_15times);
11566 ESTAT_ADD(tx_ucast_packets);
11567 ESTAT_ADD(tx_mcast_packets);
11568 ESTAT_ADD(tx_bcast_packets);
11569 ESTAT_ADD(tx_carrier_sense_errors);
11570 ESTAT_ADD(tx_discards);
11571 ESTAT_ADD(tx_errors);
11573 ESTAT_ADD(dma_writeq_full);
11574 ESTAT_ADD(dma_write_prioq_full);
11575 ESTAT_ADD(rxbds_empty);
11576 ESTAT_ADD(rx_discards);
11577 ESTAT_ADD(rx_errors);
11578 ESTAT_ADD(rx_threshold_hit);
11580 ESTAT_ADD(dma_readq_full);
11581 ESTAT_ADD(dma_read_prioq_full);
11582 ESTAT_ADD(tx_comp_queue_full);
11584 ESTAT_ADD(ring_set_send_prod_index);
11585 ESTAT_ADD(ring_status_update);
11586 ESTAT_ADD(nic_irqs);
11587 ESTAT_ADD(nic_avoided_irqs);
11588 ESTAT_ADD(nic_tx_threshold_hit);
11590 ESTAT_ADD(mbuf_lwm_thresh_hit);
11593 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11595 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11596 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11598 stats->rx_packets = old_stats->rx_packets +
11599 get_stat64(&hw_stats->rx_ucast_packets) +
11600 get_stat64(&hw_stats->rx_mcast_packets) +
11601 get_stat64(&hw_stats->rx_bcast_packets);
11603 stats->tx_packets = old_stats->tx_packets +
11604 get_stat64(&hw_stats->tx_ucast_packets) +
11605 get_stat64(&hw_stats->tx_mcast_packets) +
11606 get_stat64(&hw_stats->tx_bcast_packets);
11608 stats->rx_bytes = old_stats->rx_bytes +
11609 get_stat64(&hw_stats->rx_octets);
11610 stats->tx_bytes = old_stats->tx_bytes +
11611 get_stat64(&hw_stats->tx_octets);
11613 stats->rx_errors = old_stats->rx_errors +
11614 get_stat64(&hw_stats->rx_errors);
11615 stats->tx_errors = old_stats->tx_errors +
11616 get_stat64(&hw_stats->tx_errors) +
11617 get_stat64(&hw_stats->tx_mac_errors) +
11618 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11619 get_stat64(&hw_stats->tx_discards);
11621 stats->multicast = old_stats->multicast +
11622 get_stat64(&hw_stats->rx_mcast_packets);
11623 stats->collisions = old_stats->collisions +
11624 get_stat64(&hw_stats->tx_collisions);
11626 stats->rx_length_errors = old_stats->rx_length_errors +
11627 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11628 get_stat64(&hw_stats->rx_undersize_packets);
11630 stats->rx_over_errors = old_stats->rx_over_errors +
11631 get_stat64(&hw_stats->rxbds_empty);
11632 stats->rx_frame_errors = old_stats->rx_frame_errors +
11633 get_stat64(&hw_stats->rx_align_errors);
11634 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11635 get_stat64(&hw_stats->tx_discards);
11636 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11637 get_stat64(&hw_stats->tx_carrier_sense_errors);
11639 stats->rx_crc_errors = old_stats->rx_crc_errors +
11640 tg3_calc_crc_errors(tp);
11642 stats->rx_missed_errors = old_stats->rx_missed_errors +
11643 get_stat64(&hw_stats->rx_discards);
11645 stats->rx_dropped = tp->rx_dropped;
11646 stats->tx_dropped = tp->tx_dropped;
11649 static int tg3_get_regs_len(struct net_device *dev)
11651 return TG3_REG_BLK_SIZE;
11654 static void tg3_get_regs(struct net_device *dev,
11655 struct ethtool_regs *regs, void *_p)
11657 struct tg3 *tp = netdev_priv(dev);
11661 memset(_p, 0, TG3_REG_BLK_SIZE);
11663 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11666 tg3_full_lock(tp, 0);
11668 tg3_dump_legacy_regs(tp, (u32 *)_p);
11670 tg3_full_unlock(tp);
11673 static int tg3_get_eeprom_len(struct net_device *dev)
11675 struct tg3 *tp = netdev_priv(dev);
11677 return tp->nvram_size;
11680 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11682 struct tg3 *tp = netdev_priv(dev);
11685 u32 i, offset, len, b_offset, b_count;
11688 if (tg3_flag(tp, NO_NVRAM))
11691 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11694 offset = eeprom->offset;
11698 eeprom->magic = TG3_EEPROM_MAGIC;
11701 /* adjustments to start on required 4 byte boundary */
11702 b_offset = offset & 3;
11703 b_count = 4 - b_offset;
11704 if (b_count > len) {
11705 /* i.e. offset=1 len=2 */
11708 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11711 memcpy(data, ((char *)&val) + b_offset, b_count);
11714 eeprom->len += b_count;
11717 /* read bytes up to the last 4 byte boundary */
11718 pd = &data[eeprom->len];
11719 for (i = 0; i < (len - (len & 3)); i += 4) {
11720 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11725 memcpy(pd + i, &val, 4);
11730 /* read last bytes not ending on 4 byte boundary */
11731 pd = &data[eeprom->len];
11733 b_offset = offset + len - b_count;
11734 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11737 memcpy(pd, &val, b_count);
11738 eeprom->len += b_count;
11743 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11745 struct tg3 *tp = netdev_priv(dev);
11747 u32 offset, len, b_offset, odd_len;
11751 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11754 if (tg3_flag(tp, NO_NVRAM) ||
11755 eeprom->magic != TG3_EEPROM_MAGIC)
11758 offset = eeprom->offset;
11761 if ((b_offset = (offset & 3))) {
11762 /* adjustments to start on required 4 byte boundary */
11763 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11774 /* adjustments to end on required 4 byte boundary */
11776 len = (len + 3) & ~3;
11777 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11783 if (b_offset || odd_len) {
11784 buf = kmalloc(len, GFP_KERNEL);
11788 memcpy(buf, &start, 4);
11790 memcpy(buf+len-4, &end, 4);
11791 memcpy(buf + b_offset, data, eeprom->len);
11794 ret = tg3_nvram_write_block(tp, offset, len, buf);
11802 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11804 struct tg3 *tp = netdev_priv(dev);
11806 if (tg3_flag(tp, USE_PHYLIB)) {
11807 struct phy_device *phydev;
11808 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11810 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11811 return phy_ethtool_gset(phydev, cmd);
11814 cmd->supported = (SUPPORTED_Autoneg);
11816 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11817 cmd->supported |= (SUPPORTED_1000baseT_Half |
11818 SUPPORTED_1000baseT_Full);
11820 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11821 cmd->supported |= (SUPPORTED_100baseT_Half |
11822 SUPPORTED_100baseT_Full |
11823 SUPPORTED_10baseT_Half |
11824 SUPPORTED_10baseT_Full |
11826 cmd->port = PORT_TP;
11828 cmd->supported |= SUPPORTED_FIBRE;
11829 cmd->port = PORT_FIBRE;
11832 cmd->advertising = tp->link_config.advertising;
11833 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11834 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11835 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11836 cmd->advertising |= ADVERTISED_Pause;
11838 cmd->advertising |= ADVERTISED_Pause |
11839 ADVERTISED_Asym_Pause;
11841 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11842 cmd->advertising |= ADVERTISED_Asym_Pause;
11845 if (netif_running(dev) && tp->link_up) {
11846 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11847 cmd->duplex = tp->link_config.active_duplex;
11848 cmd->lp_advertising = tp->link_config.rmt_adv;
11849 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11850 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11851 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11853 cmd->eth_tp_mdix = ETH_TP_MDI;
11856 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11857 cmd->duplex = DUPLEX_UNKNOWN;
11858 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11860 cmd->phy_address = tp->phy_addr;
11861 cmd->transceiver = XCVR_INTERNAL;
11862 cmd->autoneg = tp->link_config.autoneg;
11868 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11870 struct tg3 *tp = netdev_priv(dev);
11871 u32 speed = ethtool_cmd_speed(cmd);
11873 if (tg3_flag(tp, USE_PHYLIB)) {
11874 struct phy_device *phydev;
11875 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11877 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11878 return phy_ethtool_sset(phydev, cmd);
11881 if (cmd->autoneg != AUTONEG_ENABLE &&
11882 cmd->autoneg != AUTONEG_DISABLE)
11885 if (cmd->autoneg == AUTONEG_DISABLE &&
11886 cmd->duplex != DUPLEX_FULL &&
11887 cmd->duplex != DUPLEX_HALF)
11890 if (cmd->autoneg == AUTONEG_ENABLE) {
11891 u32 mask = ADVERTISED_Autoneg |
11893 ADVERTISED_Asym_Pause;
11895 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11896 mask |= ADVERTISED_1000baseT_Half |
11897 ADVERTISED_1000baseT_Full;
11899 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11900 mask |= ADVERTISED_100baseT_Half |
11901 ADVERTISED_100baseT_Full |
11902 ADVERTISED_10baseT_Half |
11903 ADVERTISED_10baseT_Full |
11906 mask |= ADVERTISED_FIBRE;
11908 if (cmd->advertising & ~mask)
11911 mask &= (ADVERTISED_1000baseT_Half |
11912 ADVERTISED_1000baseT_Full |
11913 ADVERTISED_100baseT_Half |
11914 ADVERTISED_100baseT_Full |
11915 ADVERTISED_10baseT_Half |
11916 ADVERTISED_10baseT_Full);
11918 cmd->advertising &= mask;
11920 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11921 if (speed != SPEED_1000)
11924 if (cmd->duplex != DUPLEX_FULL)
11927 if (speed != SPEED_100 &&
11933 tg3_full_lock(tp, 0);
11935 tp->link_config.autoneg = cmd->autoneg;
11936 if (cmd->autoneg == AUTONEG_ENABLE) {
11937 tp->link_config.advertising = (cmd->advertising |
11938 ADVERTISED_Autoneg);
11939 tp->link_config.speed = SPEED_UNKNOWN;
11940 tp->link_config.duplex = DUPLEX_UNKNOWN;
11942 tp->link_config.advertising = 0;
11943 tp->link_config.speed = speed;
11944 tp->link_config.duplex = cmd->duplex;
11947 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11949 tg3_warn_mgmt_link_flap(tp);
11951 if (netif_running(dev))
11952 tg3_setup_phy(tp, true);
11954 tg3_full_unlock(tp);
11959 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11961 struct tg3 *tp = netdev_priv(dev);
11963 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11964 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11965 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11966 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11969 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11971 struct tg3 *tp = netdev_priv(dev);
11973 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11974 wol->supported = WAKE_MAGIC;
11976 wol->supported = 0;
11978 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11979 wol->wolopts = WAKE_MAGIC;
11980 memset(&wol->sopass, 0, sizeof(wol->sopass));
11983 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11985 struct tg3 *tp = netdev_priv(dev);
11986 struct device *dp = &tp->pdev->dev;
11988 if (wol->wolopts & ~WAKE_MAGIC)
11990 if ((wol->wolopts & WAKE_MAGIC) &&
11991 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11994 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11996 spin_lock_bh(&tp->lock);
11997 if (device_may_wakeup(dp))
11998 tg3_flag_set(tp, WOL_ENABLE);
12000 tg3_flag_clear(tp, WOL_ENABLE);
12001 spin_unlock_bh(&tp->lock);
12006 static u32 tg3_get_msglevel(struct net_device *dev)
12008 struct tg3 *tp = netdev_priv(dev);
12009 return tp->msg_enable;
12012 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12014 struct tg3 *tp = netdev_priv(dev);
12015 tp->msg_enable = value;
12018 static int tg3_nway_reset(struct net_device *dev)
12020 struct tg3 *tp = netdev_priv(dev);
12023 if (!netif_running(dev))
12026 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12029 tg3_warn_mgmt_link_flap(tp);
12031 if (tg3_flag(tp, USE_PHYLIB)) {
12032 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12034 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12038 spin_lock_bh(&tp->lock);
12040 tg3_readphy(tp, MII_BMCR, &bmcr);
12041 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12042 ((bmcr & BMCR_ANENABLE) ||
12043 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12044 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12048 spin_unlock_bh(&tp->lock);
12054 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12056 struct tg3 *tp = netdev_priv(dev);
12058 ering->rx_max_pending = tp->rx_std_ring_mask;
12059 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12060 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12062 ering->rx_jumbo_max_pending = 0;
12064 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12066 ering->rx_pending = tp->rx_pending;
12067 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12068 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12070 ering->rx_jumbo_pending = 0;
12072 ering->tx_pending = tp->napi[0].tx_pending;
12075 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12077 struct tg3 *tp = netdev_priv(dev);
12078 int i, irq_sync = 0, err = 0;
12080 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12081 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12082 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12083 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12084 (tg3_flag(tp, TSO_BUG) &&
12085 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12088 if (netif_running(dev)) {
12090 tg3_netif_stop(tp);
12094 tg3_full_lock(tp, irq_sync);
12096 tp->rx_pending = ering->rx_pending;
12098 if (tg3_flag(tp, MAX_RXPEND_64) &&
12099 tp->rx_pending > 63)
12100 tp->rx_pending = 63;
12101 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12103 for (i = 0; i < tp->irq_max; i++)
12104 tp->napi[i].tx_pending = ering->tx_pending;
12106 if (netif_running(dev)) {
12107 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12108 err = tg3_restart_hw(tp, false);
12110 tg3_netif_start(tp);
12113 tg3_full_unlock(tp);
12115 if (irq_sync && !err)
12121 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12123 struct tg3 *tp = netdev_priv(dev);
12125 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12127 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12128 epause->rx_pause = 1;
12130 epause->rx_pause = 0;
12132 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12133 epause->tx_pause = 1;
12135 epause->tx_pause = 0;
12138 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12140 struct tg3 *tp = netdev_priv(dev);
12143 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12144 tg3_warn_mgmt_link_flap(tp);
12146 if (tg3_flag(tp, USE_PHYLIB)) {
12148 struct phy_device *phydev;
12150 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12152 if (!(phydev->supported & SUPPORTED_Pause) ||
12153 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12154 (epause->rx_pause != epause->tx_pause)))
12157 tp->link_config.flowctrl = 0;
12158 if (epause->rx_pause) {
12159 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12161 if (epause->tx_pause) {
12162 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12163 newadv = ADVERTISED_Pause;
12165 newadv = ADVERTISED_Pause |
12166 ADVERTISED_Asym_Pause;
12167 } else if (epause->tx_pause) {
12168 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12169 newadv = ADVERTISED_Asym_Pause;
12173 if (epause->autoneg)
12174 tg3_flag_set(tp, PAUSE_AUTONEG);
12176 tg3_flag_clear(tp, PAUSE_AUTONEG);
12178 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12179 u32 oldadv = phydev->advertising &
12180 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12181 if (oldadv != newadv) {
12182 phydev->advertising &=
12183 ~(ADVERTISED_Pause |
12184 ADVERTISED_Asym_Pause);
12185 phydev->advertising |= newadv;
12186 if (phydev->autoneg) {
12188 * Always renegotiate the link to
12189 * inform our link partner of our
12190 * flow control settings, even if the
12191 * flow control is forced. Let
12192 * tg3_adjust_link() do the final
12193 * flow control setup.
12195 return phy_start_aneg(phydev);
12199 if (!epause->autoneg)
12200 tg3_setup_flow_control(tp, 0, 0);
12202 tp->link_config.advertising &=
12203 ~(ADVERTISED_Pause |
12204 ADVERTISED_Asym_Pause);
12205 tp->link_config.advertising |= newadv;
12210 if (netif_running(dev)) {
12211 tg3_netif_stop(tp);
12215 tg3_full_lock(tp, irq_sync);
12217 if (epause->autoneg)
12218 tg3_flag_set(tp, PAUSE_AUTONEG);
12220 tg3_flag_clear(tp, PAUSE_AUTONEG);
12221 if (epause->rx_pause)
12222 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12224 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12225 if (epause->tx_pause)
12226 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12228 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12230 if (netif_running(dev)) {
12231 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12232 err = tg3_restart_hw(tp, false);
12234 tg3_netif_start(tp);
12237 tg3_full_unlock(tp);
12240 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12245 static int tg3_get_sset_count(struct net_device *dev, int sset)
12249 return TG3_NUM_TEST;
12251 return TG3_NUM_STATS;
12253 return -EOPNOTSUPP;
12257 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12258 u32 *rules __always_unused)
12260 struct tg3 *tp = netdev_priv(dev);
12262 if (!tg3_flag(tp, SUPPORT_MSIX))
12263 return -EOPNOTSUPP;
12265 switch (info->cmd) {
12266 case ETHTOOL_GRXRINGS:
12267 if (netif_running(tp->dev))
12268 info->data = tp->rxq_cnt;
12270 info->data = num_online_cpus();
12271 if (info->data > TG3_RSS_MAX_NUM_QS)
12272 info->data = TG3_RSS_MAX_NUM_QS;
12275 /* The first interrupt vector only
12276 * handles link interrupts.
12282 return -EOPNOTSUPP;
12286 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12289 struct tg3 *tp = netdev_priv(dev);
12291 if (tg3_flag(tp, SUPPORT_MSIX))
12292 size = TG3_RSS_INDIR_TBL_SIZE;
12297 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12299 struct tg3 *tp = netdev_priv(dev);
12302 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12303 indir[i] = tp->rss_ind_tbl[i];
12308 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12310 struct tg3 *tp = netdev_priv(dev);
12313 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12314 tp->rss_ind_tbl[i] = indir[i];
12316 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12319 /* It is legal to write the indirection
12320 * table while the device is running.
12322 tg3_full_lock(tp, 0);
12323 tg3_rss_write_indir_tbl(tp);
12324 tg3_full_unlock(tp);
12329 static void tg3_get_channels(struct net_device *dev,
12330 struct ethtool_channels *channel)
12332 struct tg3 *tp = netdev_priv(dev);
12333 u32 deflt_qs = netif_get_num_default_rss_queues();
12335 channel->max_rx = tp->rxq_max;
12336 channel->max_tx = tp->txq_max;
12338 if (netif_running(dev)) {
12339 channel->rx_count = tp->rxq_cnt;
12340 channel->tx_count = tp->txq_cnt;
12343 channel->rx_count = tp->rxq_req;
12345 channel->rx_count = min(deflt_qs, tp->rxq_max);
12348 channel->tx_count = tp->txq_req;
12350 channel->tx_count = min(deflt_qs, tp->txq_max);
12354 static int tg3_set_channels(struct net_device *dev,
12355 struct ethtool_channels *channel)
12357 struct tg3 *tp = netdev_priv(dev);
12359 if (!tg3_flag(tp, SUPPORT_MSIX))
12360 return -EOPNOTSUPP;
12362 if (channel->rx_count > tp->rxq_max ||
12363 channel->tx_count > tp->txq_max)
12366 tp->rxq_req = channel->rx_count;
12367 tp->txq_req = channel->tx_count;
12369 if (!netif_running(dev))
12374 tg3_carrier_off(tp);
12376 tg3_start(tp, true, false, false);
12381 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12383 switch (stringset) {
12385 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12388 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12391 WARN_ON(1); /* we need a WARN() */
12396 static int tg3_set_phys_id(struct net_device *dev,
12397 enum ethtool_phys_id_state state)
12399 struct tg3 *tp = netdev_priv(dev);
12401 if (!netif_running(tp->dev))
12405 case ETHTOOL_ID_ACTIVE:
12406 return 1; /* cycle on/off once per second */
12408 case ETHTOOL_ID_ON:
12409 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12410 LED_CTRL_1000MBPS_ON |
12411 LED_CTRL_100MBPS_ON |
12412 LED_CTRL_10MBPS_ON |
12413 LED_CTRL_TRAFFIC_OVERRIDE |
12414 LED_CTRL_TRAFFIC_BLINK |
12415 LED_CTRL_TRAFFIC_LED);
12418 case ETHTOOL_ID_OFF:
12419 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12420 LED_CTRL_TRAFFIC_OVERRIDE);
12423 case ETHTOOL_ID_INACTIVE:
12424 tw32(MAC_LED_CTRL, tp->led_ctrl);
12431 static void tg3_get_ethtool_stats(struct net_device *dev,
12432 struct ethtool_stats *estats, u64 *tmp_stats)
12434 struct tg3 *tp = netdev_priv(dev);
12437 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12439 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12442 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12446 u32 offset = 0, len = 0;
12449 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12452 if (magic == TG3_EEPROM_MAGIC) {
12453 for (offset = TG3_NVM_DIR_START;
12454 offset < TG3_NVM_DIR_END;
12455 offset += TG3_NVM_DIRENT_SIZE) {
12456 if (tg3_nvram_read(tp, offset, &val))
12459 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12460 TG3_NVM_DIRTYPE_EXTVPD)
12464 if (offset != TG3_NVM_DIR_END) {
12465 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12466 if (tg3_nvram_read(tp, offset + 4, &offset))
12469 offset = tg3_nvram_logical_addr(tp, offset);
12473 if (!offset || !len) {
12474 offset = TG3_NVM_VPD_OFF;
12475 len = TG3_NVM_VPD_LEN;
12478 buf = kmalloc(len, GFP_KERNEL);
12482 if (magic == TG3_EEPROM_MAGIC) {
12483 for (i = 0; i < len; i += 4) {
12484 /* The data is in little-endian format in NVRAM.
12485 * Use the big-endian read routines to preserve
12486 * the byte order as it exists in NVRAM.
12488 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12494 unsigned int pos = 0;
12496 ptr = (u8 *)&buf[0];
12497 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12498 cnt = pci_read_vpd(tp->pdev, pos,
12500 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12518 #define NVRAM_TEST_SIZE 0x100
12519 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12520 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12521 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12522 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12523 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12524 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12525 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12526 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12528 static int tg3_test_nvram(struct tg3 *tp)
12530 u32 csum, magic, len;
12532 int i, j, k, err = 0, size;
12534 if (tg3_flag(tp, NO_NVRAM))
12537 if (tg3_nvram_read(tp, 0, &magic) != 0)
12540 if (magic == TG3_EEPROM_MAGIC)
12541 size = NVRAM_TEST_SIZE;
12542 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12543 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12544 TG3_EEPROM_SB_FORMAT_1) {
12545 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12546 case TG3_EEPROM_SB_REVISION_0:
12547 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12549 case TG3_EEPROM_SB_REVISION_2:
12550 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12552 case TG3_EEPROM_SB_REVISION_3:
12553 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12555 case TG3_EEPROM_SB_REVISION_4:
12556 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12558 case TG3_EEPROM_SB_REVISION_5:
12559 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12561 case TG3_EEPROM_SB_REVISION_6:
12562 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12569 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12570 size = NVRAM_SELFBOOT_HW_SIZE;
12574 buf = kmalloc(size, GFP_KERNEL);
12579 for (i = 0, j = 0; i < size; i += 4, j++) {
12580 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12587 /* Selfboot format */
12588 magic = be32_to_cpu(buf[0]);
12589 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12590 TG3_EEPROM_MAGIC_FW) {
12591 u8 *buf8 = (u8 *) buf, csum8 = 0;
12593 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12594 TG3_EEPROM_SB_REVISION_2) {
12595 /* For rev 2, the csum doesn't include the MBA. */
12596 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12598 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12601 for (i = 0; i < size; i++)
12614 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12615 TG3_EEPROM_MAGIC_HW) {
12616 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12617 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12618 u8 *buf8 = (u8 *) buf;
12620 /* Separate the parity bits and the data bytes. */
12621 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12622 if ((i == 0) || (i == 8)) {
12626 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12627 parity[k++] = buf8[i] & msk;
12629 } else if (i == 16) {
12633 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12634 parity[k++] = buf8[i] & msk;
12637 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12638 parity[k++] = buf8[i] & msk;
12641 data[j++] = buf8[i];
12645 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12646 u8 hw8 = hweight8(data[i]);
12648 if ((hw8 & 0x1) && parity[i])
12650 else if (!(hw8 & 0x1) && !parity[i])
12659 /* Bootstrap checksum at offset 0x10 */
12660 csum = calc_crc((unsigned char *) buf, 0x10);
12661 if (csum != le32_to_cpu(buf[0x10/4]))
12664 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12665 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12666 if (csum != le32_to_cpu(buf[0xfc/4]))
12671 buf = tg3_vpd_readblock(tp, &len);
12675 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12677 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12681 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12684 i += PCI_VPD_LRDT_TAG_SIZE;
12685 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12686 PCI_VPD_RO_KEYWORD_CHKSUM);
12690 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12692 for (i = 0; i <= j; i++)
12693 csum8 += ((u8 *)buf)[i];
12707 #define TG3_SERDES_TIMEOUT_SEC 2
12708 #define TG3_COPPER_TIMEOUT_SEC 6
12710 static int tg3_test_link(struct tg3 *tp)
12714 if (!netif_running(tp->dev))
12717 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12718 max = TG3_SERDES_TIMEOUT_SEC;
12720 max = TG3_COPPER_TIMEOUT_SEC;
12722 for (i = 0; i < max; i++) {
12726 if (msleep_interruptible(1000))
12733 /* Only test the commonly used registers */
12734 static int tg3_test_registers(struct tg3 *tp)
12736 int i, is_5705, is_5750;
12737 u32 offset, read_mask, write_mask, val, save_val, read_val;
12741 #define TG3_FL_5705 0x1
12742 #define TG3_FL_NOT_5705 0x2
12743 #define TG3_FL_NOT_5788 0x4
12744 #define TG3_FL_NOT_5750 0x8
12748 /* MAC Control Registers */
12749 { MAC_MODE, TG3_FL_NOT_5705,
12750 0x00000000, 0x00ef6f8c },
12751 { MAC_MODE, TG3_FL_5705,
12752 0x00000000, 0x01ef6b8c },
12753 { MAC_STATUS, TG3_FL_NOT_5705,
12754 0x03800107, 0x00000000 },
12755 { MAC_STATUS, TG3_FL_5705,
12756 0x03800100, 0x00000000 },
12757 { MAC_ADDR_0_HIGH, 0x0000,
12758 0x00000000, 0x0000ffff },
12759 { MAC_ADDR_0_LOW, 0x0000,
12760 0x00000000, 0xffffffff },
12761 { MAC_RX_MTU_SIZE, 0x0000,
12762 0x00000000, 0x0000ffff },
12763 { MAC_TX_MODE, 0x0000,
12764 0x00000000, 0x00000070 },
12765 { MAC_TX_LENGTHS, 0x0000,
12766 0x00000000, 0x00003fff },
12767 { MAC_RX_MODE, TG3_FL_NOT_5705,
12768 0x00000000, 0x000007fc },
12769 { MAC_RX_MODE, TG3_FL_5705,
12770 0x00000000, 0x000007dc },
12771 { MAC_HASH_REG_0, 0x0000,
12772 0x00000000, 0xffffffff },
12773 { MAC_HASH_REG_1, 0x0000,
12774 0x00000000, 0xffffffff },
12775 { MAC_HASH_REG_2, 0x0000,
12776 0x00000000, 0xffffffff },
12777 { MAC_HASH_REG_3, 0x0000,
12778 0x00000000, 0xffffffff },
12780 /* Receive Data and Receive BD Initiator Control Registers. */
12781 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12782 0x00000000, 0xffffffff },
12783 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12784 0x00000000, 0xffffffff },
12785 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12786 0x00000000, 0x00000003 },
12787 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12788 0x00000000, 0xffffffff },
12789 { RCVDBDI_STD_BD+0, 0x0000,
12790 0x00000000, 0xffffffff },
12791 { RCVDBDI_STD_BD+4, 0x0000,
12792 0x00000000, 0xffffffff },
12793 { RCVDBDI_STD_BD+8, 0x0000,
12794 0x00000000, 0xffff0002 },
12795 { RCVDBDI_STD_BD+0xc, 0x0000,
12796 0x00000000, 0xffffffff },
12798 /* Receive BD Initiator Control Registers. */
12799 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12800 0x00000000, 0xffffffff },
12801 { RCVBDI_STD_THRESH, TG3_FL_5705,
12802 0x00000000, 0x000003ff },
12803 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12804 0x00000000, 0xffffffff },
12806 /* Host Coalescing Control Registers. */
12807 { HOSTCC_MODE, TG3_FL_NOT_5705,
12808 0x00000000, 0x00000004 },
12809 { HOSTCC_MODE, TG3_FL_5705,
12810 0x00000000, 0x000000f6 },
12811 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12812 0x00000000, 0xffffffff },
12813 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12814 0x00000000, 0x000003ff },
12815 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12816 0x00000000, 0xffffffff },
12817 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12818 0x00000000, 0x000003ff },
12819 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12820 0x00000000, 0xffffffff },
12821 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12822 0x00000000, 0x000000ff },
12823 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12824 0x00000000, 0xffffffff },
12825 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12826 0x00000000, 0x000000ff },
12827 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12828 0x00000000, 0xffffffff },
12829 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12830 0x00000000, 0xffffffff },
12831 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12832 0x00000000, 0xffffffff },
12833 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12834 0x00000000, 0x000000ff },
12835 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12836 0x00000000, 0xffffffff },
12837 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12838 0x00000000, 0x000000ff },
12839 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12840 0x00000000, 0xffffffff },
12841 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12842 0x00000000, 0xffffffff },
12843 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12844 0x00000000, 0xffffffff },
12845 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12846 0x00000000, 0xffffffff },
12847 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12848 0x00000000, 0xffffffff },
12849 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12850 0xffffffff, 0x00000000 },
12851 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12852 0xffffffff, 0x00000000 },
12854 /* Buffer Manager Control Registers. */
12855 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12856 0x00000000, 0x007fff80 },
12857 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12858 0x00000000, 0x007fffff },
12859 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12860 0x00000000, 0x0000003f },
12861 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12862 0x00000000, 0x000001ff },
12863 { BUFMGR_MB_HIGH_WATER, 0x0000,
12864 0x00000000, 0x000001ff },
12865 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12866 0xffffffff, 0x00000000 },
12867 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12868 0xffffffff, 0x00000000 },
12870 /* Mailbox Registers */
12871 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12872 0x00000000, 0x000001ff },
12873 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12874 0x00000000, 0x000001ff },
12875 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12876 0x00000000, 0x000007ff },
12877 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12878 0x00000000, 0x000001ff },
12880 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12883 is_5705 = is_5750 = 0;
12884 if (tg3_flag(tp, 5705_PLUS)) {
12886 if (tg3_flag(tp, 5750_PLUS))
12890 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12891 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12894 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12897 if (tg3_flag(tp, IS_5788) &&
12898 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12901 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12904 offset = (u32) reg_tbl[i].offset;
12905 read_mask = reg_tbl[i].read_mask;
12906 write_mask = reg_tbl[i].write_mask;
12908 /* Save the original register content */
12909 save_val = tr32(offset);
12911 /* Determine the read-only value. */
12912 read_val = save_val & read_mask;
12914 /* Write zero to the register, then make sure the read-only bits
12915 * are not changed and the read/write bits are all zeros.
12919 val = tr32(offset);
12921 /* Test the read-only and read/write bits. */
12922 if (((val & read_mask) != read_val) || (val & write_mask))
12925 /* Write ones to all the bits defined by RdMask and WrMask, then
12926 * make sure the read-only bits are not changed and the
12927 * read/write bits are all ones.
12929 tw32(offset, read_mask | write_mask);
12931 val = tr32(offset);
12933 /* Test the read-only bits. */
12934 if ((val & read_mask) != read_val)
12937 /* Test the read/write bits. */
12938 if ((val & write_mask) != write_mask)
12941 tw32(offset, save_val);
12947 if (netif_msg_hw(tp))
12948 netdev_err(tp->dev,
12949 "Register test failed at offset %x\n", offset);
12950 tw32(offset, save_val);
12954 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12956 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12960 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12961 for (j = 0; j < len; j += 4) {
12964 tg3_write_mem(tp, offset + j, test_pattern[i]);
12965 tg3_read_mem(tp, offset + j, &val);
12966 if (val != test_pattern[i])
12973 static int tg3_test_memory(struct tg3 *tp)
12975 static struct mem_entry {
12978 } mem_tbl_570x[] = {
12979 { 0x00000000, 0x00b50},
12980 { 0x00002000, 0x1c000},
12981 { 0xffffffff, 0x00000}
12982 }, mem_tbl_5705[] = {
12983 { 0x00000100, 0x0000c},
12984 { 0x00000200, 0x00008},
12985 { 0x00004000, 0x00800},
12986 { 0x00006000, 0x01000},
12987 { 0x00008000, 0x02000},
12988 { 0x00010000, 0x0e000},
12989 { 0xffffffff, 0x00000}
12990 }, mem_tbl_5755[] = {
12991 { 0x00000200, 0x00008},
12992 { 0x00004000, 0x00800},
12993 { 0x00006000, 0x00800},
12994 { 0x00008000, 0x02000},
12995 { 0x00010000, 0x0c000},
12996 { 0xffffffff, 0x00000}
12997 }, mem_tbl_5906[] = {
12998 { 0x00000200, 0x00008},
12999 { 0x00004000, 0x00400},
13000 { 0x00006000, 0x00400},
13001 { 0x00008000, 0x01000},
13002 { 0x00010000, 0x01000},
13003 { 0xffffffff, 0x00000}
13004 }, mem_tbl_5717[] = {
13005 { 0x00000200, 0x00008},
13006 { 0x00010000, 0x0a000},
13007 { 0x00020000, 0x13c00},
13008 { 0xffffffff, 0x00000}
13009 }, mem_tbl_57765[] = {
13010 { 0x00000200, 0x00008},
13011 { 0x00004000, 0x00800},
13012 { 0x00006000, 0x09800},
13013 { 0x00010000, 0x0a000},
13014 { 0xffffffff, 0x00000}
13016 struct mem_entry *mem_tbl;
13020 if (tg3_flag(tp, 5717_PLUS))
13021 mem_tbl = mem_tbl_5717;
13022 else if (tg3_flag(tp, 57765_CLASS) ||
13023 tg3_asic_rev(tp) == ASIC_REV_5762)
13024 mem_tbl = mem_tbl_57765;
13025 else if (tg3_flag(tp, 5755_PLUS))
13026 mem_tbl = mem_tbl_5755;
13027 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13028 mem_tbl = mem_tbl_5906;
13029 else if (tg3_flag(tp, 5705_PLUS))
13030 mem_tbl = mem_tbl_5705;
13032 mem_tbl = mem_tbl_570x;
13034 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13035 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13043 #define TG3_TSO_MSS 500
13045 #define TG3_TSO_IP_HDR_LEN 20
13046 #define TG3_TSO_TCP_HDR_LEN 20
13047 #define TG3_TSO_TCP_OPT_LEN 12
13049 static const u8 tg3_tso_header[] = {
13051 0x45, 0x00, 0x00, 0x00,
13052 0x00, 0x00, 0x40, 0x00,
13053 0x40, 0x06, 0x00, 0x00,
13054 0x0a, 0x00, 0x00, 0x01,
13055 0x0a, 0x00, 0x00, 0x02,
13056 0x0d, 0x00, 0xe0, 0x00,
13057 0x00, 0x00, 0x01, 0x00,
13058 0x00, 0x00, 0x02, 0x00,
13059 0x80, 0x10, 0x10, 0x00,
13060 0x14, 0x09, 0x00, 0x00,
13061 0x01, 0x01, 0x08, 0x0a,
13062 0x11, 0x11, 0x11, 0x11,
13063 0x11, 0x11, 0x11, 0x11,
13066 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13068 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13069 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13071 struct sk_buff *skb;
13072 u8 *tx_data, *rx_data;
13074 int num_pkts, tx_len, rx_len, i, err;
13075 struct tg3_rx_buffer_desc *desc;
13076 struct tg3_napi *tnapi, *rnapi;
13077 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13079 tnapi = &tp->napi[0];
13080 rnapi = &tp->napi[0];
13081 if (tp->irq_cnt > 1) {
13082 if (tg3_flag(tp, ENABLE_RSS))
13083 rnapi = &tp->napi[1];
13084 if (tg3_flag(tp, ENABLE_TSS))
13085 tnapi = &tp->napi[1];
13087 coal_now = tnapi->coal_now | rnapi->coal_now;
13092 skb = netdev_alloc_skb(tp->dev, tx_len);
13096 tx_data = skb_put(skb, tx_len);
13097 memcpy(tx_data, tp->dev->dev_addr, 6);
13098 memset(tx_data + 6, 0x0, 8);
13100 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13102 if (tso_loopback) {
13103 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13105 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13106 TG3_TSO_TCP_OPT_LEN;
13108 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13109 sizeof(tg3_tso_header));
13112 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13113 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13115 /* Set the total length field in the IP header */
13116 iph->tot_len = htons((u16)(mss + hdr_len));
13118 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13119 TXD_FLAG_CPU_POST_DMA);
13121 if (tg3_flag(tp, HW_TSO_1) ||
13122 tg3_flag(tp, HW_TSO_2) ||
13123 tg3_flag(tp, HW_TSO_3)) {
13125 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13126 th = (struct tcphdr *)&tx_data[val];
13129 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13131 if (tg3_flag(tp, HW_TSO_3)) {
13132 mss |= (hdr_len & 0xc) << 12;
13133 if (hdr_len & 0x10)
13134 base_flags |= 0x00000010;
13135 base_flags |= (hdr_len & 0x3e0) << 5;
13136 } else if (tg3_flag(tp, HW_TSO_2))
13137 mss |= hdr_len << 9;
13138 else if (tg3_flag(tp, HW_TSO_1) ||
13139 tg3_asic_rev(tp) == ASIC_REV_5705) {
13140 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13142 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13145 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13148 data_off = ETH_HLEN;
13150 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13151 tx_len > VLAN_ETH_FRAME_LEN)
13152 base_flags |= TXD_FLAG_JMB_PKT;
13155 for (i = data_off; i < tx_len; i++)
13156 tx_data[i] = (u8) (i & 0xff);
13158 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13159 if (pci_dma_mapping_error(tp->pdev, map)) {
13160 dev_kfree_skb(skb);
13164 val = tnapi->tx_prod;
13165 tnapi->tx_buffers[val].skb = skb;
13166 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13168 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13173 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13175 budget = tg3_tx_avail(tnapi);
13176 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13177 base_flags | TXD_FLAG_END, mss, 0)) {
13178 tnapi->tx_buffers[val].skb = NULL;
13179 dev_kfree_skb(skb);
13185 /* Sync BD data before updating mailbox */
13188 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13189 tr32_mailbox(tnapi->prodmbox);
13193 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13194 for (i = 0; i < 35; i++) {
13195 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13200 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13201 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13202 if ((tx_idx == tnapi->tx_prod) &&
13203 (rx_idx == (rx_start_idx + num_pkts)))
13207 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13208 dev_kfree_skb(skb);
13210 if (tx_idx != tnapi->tx_prod)
13213 if (rx_idx != rx_start_idx + num_pkts)
13217 while (rx_idx != rx_start_idx) {
13218 desc = &rnapi->rx_rcb[rx_start_idx++];
13219 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13220 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13222 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13223 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13226 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13229 if (!tso_loopback) {
13230 if (rx_len != tx_len)
13233 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13234 if (opaque_key != RXD_OPAQUE_RING_STD)
13237 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13240 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13241 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13242 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13246 if (opaque_key == RXD_OPAQUE_RING_STD) {
13247 rx_data = tpr->rx_std_buffers[desc_idx].data;
13248 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13250 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13251 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13252 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13257 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13258 PCI_DMA_FROMDEVICE);
13260 rx_data += TG3_RX_OFFSET(tp);
13261 for (i = data_off; i < rx_len; i++, val++) {
13262 if (*(rx_data + i) != (u8) (val & 0xff))
13269 /* tg3_free_rings will unmap and free the rx_data */
13274 #define TG3_STD_LOOPBACK_FAILED 1
13275 #define TG3_JMB_LOOPBACK_FAILED 2
13276 #define TG3_TSO_LOOPBACK_FAILED 4
13277 #define TG3_LOOPBACK_FAILED \
13278 (TG3_STD_LOOPBACK_FAILED | \
13279 TG3_JMB_LOOPBACK_FAILED | \
13280 TG3_TSO_LOOPBACK_FAILED)
13282 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13286 u32 jmb_pkt_sz = 9000;
13289 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13291 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13292 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13294 if (!netif_running(tp->dev)) {
13295 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13296 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13298 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13302 err = tg3_reset_hw(tp, true);
13304 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13305 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13307 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13311 if (tg3_flag(tp, ENABLE_RSS)) {
13314 /* Reroute all rx packets to the 1st queue */
13315 for (i = MAC_RSS_INDIR_TBL_0;
13316 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13320 /* HW errata - mac loopback fails in some cases on 5780.
13321 * Normal traffic and PHY loopback are not affected by
13322 * errata. Also, the MAC loopback test is deprecated for
13323 * all newer ASIC revisions.
13325 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13326 !tg3_flag(tp, CPMU_PRESENT)) {
13327 tg3_mac_loopback(tp, true);
13329 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13330 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13332 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13333 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13334 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13336 tg3_mac_loopback(tp, false);
13339 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13340 !tg3_flag(tp, USE_PHYLIB)) {
13343 tg3_phy_lpbk_set(tp, 0, false);
13345 /* Wait for link */
13346 for (i = 0; i < 100; i++) {
13347 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13352 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13353 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13354 if (tg3_flag(tp, TSO_CAPABLE) &&
13355 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13356 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13357 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13358 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13359 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13362 tg3_phy_lpbk_set(tp, 0, true);
13364 /* All link indications report up, but the hardware
13365 * isn't really ready for about 20 msec. Double it
13370 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13371 data[TG3_EXT_LOOPB_TEST] |=
13372 TG3_STD_LOOPBACK_FAILED;
13373 if (tg3_flag(tp, TSO_CAPABLE) &&
13374 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13375 data[TG3_EXT_LOOPB_TEST] |=
13376 TG3_TSO_LOOPBACK_FAILED;
13377 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13378 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13379 data[TG3_EXT_LOOPB_TEST] |=
13380 TG3_JMB_LOOPBACK_FAILED;
13383 /* Re-enable gphy autopowerdown. */
13384 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13385 tg3_phy_toggle_apd(tp, true);
13388 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13389 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13392 tp->phy_flags |= eee_cap;
13397 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13400 struct tg3 *tp = netdev_priv(dev);
13401 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13403 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13404 if (tg3_power_up(tp)) {
13405 etest->flags |= ETH_TEST_FL_FAILED;
13406 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13409 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13412 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13414 if (tg3_test_nvram(tp) != 0) {
13415 etest->flags |= ETH_TEST_FL_FAILED;
13416 data[TG3_NVRAM_TEST] = 1;
13418 if (!doextlpbk && tg3_test_link(tp)) {
13419 etest->flags |= ETH_TEST_FL_FAILED;
13420 data[TG3_LINK_TEST] = 1;
13422 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13423 int err, err2 = 0, irq_sync = 0;
13425 if (netif_running(dev)) {
13427 tg3_netif_stop(tp);
13431 tg3_full_lock(tp, irq_sync);
13432 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13433 err = tg3_nvram_lock(tp);
13434 tg3_halt_cpu(tp, RX_CPU_BASE);
13435 if (!tg3_flag(tp, 5705_PLUS))
13436 tg3_halt_cpu(tp, TX_CPU_BASE);
13438 tg3_nvram_unlock(tp);
13440 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13443 if (tg3_test_registers(tp) != 0) {
13444 etest->flags |= ETH_TEST_FL_FAILED;
13445 data[TG3_REGISTER_TEST] = 1;
13448 if (tg3_test_memory(tp) != 0) {
13449 etest->flags |= ETH_TEST_FL_FAILED;
13450 data[TG3_MEMORY_TEST] = 1;
13454 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13456 if (tg3_test_loopback(tp, data, doextlpbk))
13457 etest->flags |= ETH_TEST_FL_FAILED;
13459 tg3_full_unlock(tp);
13461 if (tg3_test_interrupt(tp) != 0) {
13462 etest->flags |= ETH_TEST_FL_FAILED;
13463 data[TG3_INTERRUPT_TEST] = 1;
13466 tg3_full_lock(tp, 0);
13468 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13469 if (netif_running(dev)) {
13470 tg3_flag_set(tp, INIT_COMPLETE);
13471 err2 = tg3_restart_hw(tp, true);
13473 tg3_netif_start(tp);
13476 tg3_full_unlock(tp);
13478 if (irq_sync && !err2)
13481 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13482 tg3_power_down(tp);
13486 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13487 struct ifreq *ifr, int cmd)
13489 struct tg3 *tp = netdev_priv(dev);
13490 struct hwtstamp_config stmpconf;
13492 if (!tg3_flag(tp, PTP_CAPABLE))
13495 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13498 if (stmpconf.flags)
13501 switch (stmpconf.tx_type) {
13502 case HWTSTAMP_TX_ON:
13503 tg3_flag_set(tp, TX_TSTAMP_EN);
13505 case HWTSTAMP_TX_OFF:
13506 tg3_flag_clear(tp, TX_TSTAMP_EN);
13512 switch (stmpconf.rx_filter) {
13513 case HWTSTAMP_FILTER_NONE:
13516 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13517 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13518 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13520 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13521 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13522 TG3_RX_PTP_CTL_SYNC_EVNT;
13524 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13525 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13526 TG3_RX_PTP_CTL_DELAY_REQ;
13528 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13529 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13530 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13532 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13533 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13534 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13536 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13537 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13538 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13540 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13541 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13542 TG3_RX_PTP_CTL_SYNC_EVNT;
13544 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13545 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13546 TG3_RX_PTP_CTL_SYNC_EVNT;
13548 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13549 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13550 TG3_RX_PTP_CTL_SYNC_EVNT;
13552 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13553 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13554 TG3_RX_PTP_CTL_DELAY_REQ;
13556 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13557 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13558 TG3_RX_PTP_CTL_DELAY_REQ;
13560 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13561 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13562 TG3_RX_PTP_CTL_DELAY_REQ;
13568 if (netif_running(dev) && tp->rxptpctl)
13569 tw32(TG3_RX_PTP_CTL,
13570 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13572 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13576 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13578 struct mii_ioctl_data *data = if_mii(ifr);
13579 struct tg3 *tp = netdev_priv(dev);
13582 if (tg3_flag(tp, USE_PHYLIB)) {
13583 struct phy_device *phydev;
13584 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13586 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13587 return phy_mii_ioctl(phydev, ifr, cmd);
13592 data->phy_id = tp->phy_addr;
13595 case SIOCGMIIREG: {
13598 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13599 break; /* We have no PHY */
13601 if (!netif_running(dev))
13604 spin_lock_bh(&tp->lock);
13605 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13606 data->reg_num & 0x1f, &mii_regval);
13607 spin_unlock_bh(&tp->lock);
13609 data->val_out = mii_regval;
13615 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13616 break; /* We have no PHY */
13618 if (!netif_running(dev))
13621 spin_lock_bh(&tp->lock);
13622 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13623 data->reg_num & 0x1f, data->val_in);
13624 spin_unlock_bh(&tp->lock);
13628 case SIOCSHWTSTAMP:
13629 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13635 return -EOPNOTSUPP;
13638 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13640 struct tg3 *tp = netdev_priv(dev);
13642 memcpy(ec, &tp->coal, sizeof(*ec));
13646 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13648 struct tg3 *tp = netdev_priv(dev);
13649 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13650 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13652 if (!tg3_flag(tp, 5705_PLUS)) {
13653 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13654 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13655 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13656 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13659 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13660 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13661 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13662 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13663 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13664 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13665 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13666 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13667 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13668 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13671 /* No rx interrupts will be generated if both are zero */
13672 if ((ec->rx_coalesce_usecs == 0) &&
13673 (ec->rx_max_coalesced_frames == 0))
13676 /* No tx interrupts will be generated if both are zero */
13677 if ((ec->tx_coalesce_usecs == 0) &&
13678 (ec->tx_max_coalesced_frames == 0))
13681 /* Only copy relevant parameters, ignore all others. */
13682 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13683 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13684 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13685 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13686 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13687 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13688 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13689 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13690 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13692 if (netif_running(dev)) {
13693 tg3_full_lock(tp, 0);
13694 __tg3_set_coalesce(tp, &tp->coal);
13695 tg3_full_unlock(tp);
13700 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13702 struct tg3 *tp = netdev_priv(dev);
13704 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13705 netdev_warn(tp->dev, "Board does not support EEE!\n");
13706 return -EOPNOTSUPP;
13709 if (edata->advertised != tp->eee.advertised) {
13710 netdev_warn(tp->dev,
13711 "Direct manipulation of EEE advertisement is not supported\n");
13715 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13716 netdev_warn(tp->dev,
13717 "Maximal Tx Lpi timer supported is %#x(u)\n",
13718 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13724 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13725 tg3_warn_mgmt_link_flap(tp);
13727 if (netif_running(tp->dev)) {
13728 tg3_full_lock(tp, 0);
13731 tg3_full_unlock(tp);
13737 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13739 struct tg3 *tp = netdev_priv(dev);
13741 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13742 netdev_warn(tp->dev,
13743 "Board does not support EEE!\n");
13744 return -EOPNOTSUPP;
13751 static const struct ethtool_ops tg3_ethtool_ops = {
13752 .get_settings = tg3_get_settings,
13753 .set_settings = tg3_set_settings,
13754 .get_drvinfo = tg3_get_drvinfo,
13755 .get_regs_len = tg3_get_regs_len,
13756 .get_regs = tg3_get_regs,
13757 .get_wol = tg3_get_wol,
13758 .set_wol = tg3_set_wol,
13759 .get_msglevel = tg3_get_msglevel,
13760 .set_msglevel = tg3_set_msglevel,
13761 .nway_reset = tg3_nway_reset,
13762 .get_link = ethtool_op_get_link,
13763 .get_eeprom_len = tg3_get_eeprom_len,
13764 .get_eeprom = tg3_get_eeprom,
13765 .set_eeprom = tg3_set_eeprom,
13766 .get_ringparam = tg3_get_ringparam,
13767 .set_ringparam = tg3_set_ringparam,
13768 .get_pauseparam = tg3_get_pauseparam,
13769 .set_pauseparam = tg3_set_pauseparam,
13770 .self_test = tg3_self_test,
13771 .get_strings = tg3_get_strings,
13772 .set_phys_id = tg3_set_phys_id,
13773 .get_ethtool_stats = tg3_get_ethtool_stats,
13774 .get_coalesce = tg3_get_coalesce,
13775 .set_coalesce = tg3_set_coalesce,
13776 .get_sset_count = tg3_get_sset_count,
13777 .get_rxnfc = tg3_get_rxnfc,
13778 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13779 .get_rxfh_indir = tg3_get_rxfh_indir,
13780 .set_rxfh_indir = tg3_set_rxfh_indir,
13781 .get_channels = tg3_get_channels,
13782 .set_channels = tg3_set_channels,
13783 .get_ts_info = tg3_get_ts_info,
13784 .get_eee = tg3_get_eee,
13785 .set_eee = tg3_set_eee,
13788 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13789 struct rtnl_link_stats64 *stats)
13791 struct tg3 *tp = netdev_priv(dev);
13793 spin_lock_bh(&tp->lock);
13794 if (!tp->hw_stats) {
13795 spin_unlock_bh(&tp->lock);
13796 return &tp->net_stats_prev;
13799 tg3_get_nstats(tp, stats);
13800 spin_unlock_bh(&tp->lock);
13805 static void tg3_set_rx_mode(struct net_device *dev)
13807 struct tg3 *tp = netdev_priv(dev);
13809 if (!netif_running(dev))
13812 tg3_full_lock(tp, 0);
13813 __tg3_set_rx_mode(dev);
13814 tg3_full_unlock(tp);
13817 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13820 dev->mtu = new_mtu;
13822 if (new_mtu > ETH_DATA_LEN) {
13823 if (tg3_flag(tp, 5780_CLASS)) {
13824 netdev_update_features(dev);
13825 tg3_flag_clear(tp, TSO_CAPABLE);
13827 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13830 if (tg3_flag(tp, 5780_CLASS)) {
13831 tg3_flag_set(tp, TSO_CAPABLE);
13832 netdev_update_features(dev);
13834 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13838 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13840 struct tg3 *tp = netdev_priv(dev);
13842 bool reset_phy = false;
13844 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13847 if (!netif_running(dev)) {
13848 /* We'll just catch it later when the
13851 tg3_set_mtu(dev, tp, new_mtu);
13857 tg3_netif_stop(tp);
13859 tg3_full_lock(tp, 1);
13861 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13863 tg3_set_mtu(dev, tp, new_mtu);
13865 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13866 * breaks all requests to 256 bytes.
13868 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13871 err = tg3_restart_hw(tp, reset_phy);
13874 tg3_netif_start(tp);
13876 tg3_full_unlock(tp);
13884 static const struct net_device_ops tg3_netdev_ops = {
13885 .ndo_open = tg3_open,
13886 .ndo_stop = tg3_close,
13887 .ndo_start_xmit = tg3_start_xmit,
13888 .ndo_get_stats64 = tg3_get_stats64,
13889 .ndo_validate_addr = eth_validate_addr,
13890 .ndo_set_rx_mode = tg3_set_rx_mode,
13891 .ndo_set_mac_address = tg3_set_mac_addr,
13892 .ndo_do_ioctl = tg3_ioctl,
13893 .ndo_tx_timeout = tg3_tx_timeout,
13894 .ndo_change_mtu = tg3_change_mtu,
13895 .ndo_fix_features = tg3_fix_features,
13896 .ndo_set_features = tg3_set_features,
13897 #ifdef CONFIG_NET_POLL_CONTROLLER
13898 .ndo_poll_controller = tg3_poll_controller,
13902 static void tg3_get_eeprom_size(struct tg3 *tp)
13904 u32 cursize, val, magic;
13906 tp->nvram_size = EEPROM_CHIP_SIZE;
13908 if (tg3_nvram_read(tp, 0, &magic) != 0)
13911 if ((magic != TG3_EEPROM_MAGIC) &&
13912 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13913 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13917 * Size the chip by reading offsets at increasing powers of two.
13918 * When we encounter our validation signature, we know the addressing
13919 * has wrapped around, and thus have our chip size.
13923 while (cursize < tp->nvram_size) {
13924 if (tg3_nvram_read(tp, cursize, &val) != 0)
13933 tp->nvram_size = cursize;
13936 static void tg3_get_nvram_size(struct tg3 *tp)
13940 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13943 /* Selfboot format */
13944 if (val != TG3_EEPROM_MAGIC) {
13945 tg3_get_eeprom_size(tp);
13949 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13951 /* This is confusing. We want to operate on the
13952 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13953 * call will read from NVRAM and byteswap the data
13954 * according to the byteswapping settings for all
13955 * other register accesses. This ensures the data we
13956 * want will always reside in the lower 16-bits.
13957 * However, the data in NVRAM is in LE format, which
13958 * means the data from the NVRAM read will always be
13959 * opposite the endianness of the CPU. The 16-bit
13960 * byteswap then brings the data to CPU endianness.
13962 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13966 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13969 static void tg3_get_nvram_info(struct tg3 *tp)
13973 nvcfg1 = tr32(NVRAM_CFG1);
13974 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13975 tg3_flag_set(tp, FLASH);
13977 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13978 tw32(NVRAM_CFG1, nvcfg1);
13981 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13982 tg3_flag(tp, 5780_CLASS)) {
13983 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13984 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13985 tp->nvram_jedecnum = JEDEC_ATMEL;
13986 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13987 tg3_flag_set(tp, NVRAM_BUFFERED);
13989 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13990 tp->nvram_jedecnum = JEDEC_ATMEL;
13991 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13993 case FLASH_VENDOR_ATMEL_EEPROM:
13994 tp->nvram_jedecnum = JEDEC_ATMEL;
13995 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13996 tg3_flag_set(tp, NVRAM_BUFFERED);
13998 case FLASH_VENDOR_ST:
13999 tp->nvram_jedecnum = JEDEC_ST;
14000 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14001 tg3_flag_set(tp, NVRAM_BUFFERED);
14003 case FLASH_VENDOR_SAIFUN:
14004 tp->nvram_jedecnum = JEDEC_SAIFUN;
14005 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14007 case FLASH_VENDOR_SST_SMALL:
14008 case FLASH_VENDOR_SST_LARGE:
14009 tp->nvram_jedecnum = JEDEC_SST;
14010 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14014 tp->nvram_jedecnum = JEDEC_ATMEL;
14015 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14016 tg3_flag_set(tp, NVRAM_BUFFERED);
14020 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14022 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14023 case FLASH_5752PAGE_SIZE_256:
14024 tp->nvram_pagesize = 256;
14026 case FLASH_5752PAGE_SIZE_512:
14027 tp->nvram_pagesize = 512;
14029 case FLASH_5752PAGE_SIZE_1K:
14030 tp->nvram_pagesize = 1024;
14032 case FLASH_5752PAGE_SIZE_2K:
14033 tp->nvram_pagesize = 2048;
14035 case FLASH_5752PAGE_SIZE_4K:
14036 tp->nvram_pagesize = 4096;
14038 case FLASH_5752PAGE_SIZE_264:
14039 tp->nvram_pagesize = 264;
14041 case FLASH_5752PAGE_SIZE_528:
14042 tp->nvram_pagesize = 528;
14047 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14051 nvcfg1 = tr32(NVRAM_CFG1);
14053 /* NVRAM protection for TPM */
14054 if (nvcfg1 & (1 << 27))
14055 tg3_flag_set(tp, PROTECTED_NVRAM);
14057 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14058 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14059 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14060 tp->nvram_jedecnum = JEDEC_ATMEL;
14061 tg3_flag_set(tp, NVRAM_BUFFERED);
14063 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14064 tp->nvram_jedecnum = JEDEC_ATMEL;
14065 tg3_flag_set(tp, NVRAM_BUFFERED);
14066 tg3_flag_set(tp, FLASH);
14068 case FLASH_5752VENDOR_ST_M45PE10:
14069 case FLASH_5752VENDOR_ST_M45PE20:
14070 case FLASH_5752VENDOR_ST_M45PE40:
14071 tp->nvram_jedecnum = JEDEC_ST;
14072 tg3_flag_set(tp, NVRAM_BUFFERED);
14073 tg3_flag_set(tp, FLASH);
14077 if (tg3_flag(tp, FLASH)) {
14078 tg3_nvram_get_pagesize(tp, nvcfg1);
14080 /* For eeprom, set pagesize to maximum eeprom size */
14081 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14083 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14084 tw32(NVRAM_CFG1, nvcfg1);
14088 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14090 u32 nvcfg1, protect = 0;
14092 nvcfg1 = tr32(NVRAM_CFG1);
14094 /* NVRAM protection for TPM */
14095 if (nvcfg1 & (1 << 27)) {
14096 tg3_flag_set(tp, PROTECTED_NVRAM);
14100 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14102 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14103 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14104 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14105 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14106 tp->nvram_jedecnum = JEDEC_ATMEL;
14107 tg3_flag_set(tp, NVRAM_BUFFERED);
14108 tg3_flag_set(tp, FLASH);
14109 tp->nvram_pagesize = 264;
14110 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14111 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14112 tp->nvram_size = (protect ? 0x3e200 :
14113 TG3_NVRAM_SIZE_512KB);
14114 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14115 tp->nvram_size = (protect ? 0x1f200 :
14116 TG3_NVRAM_SIZE_256KB);
14118 tp->nvram_size = (protect ? 0x1f200 :
14119 TG3_NVRAM_SIZE_128KB);
14121 case FLASH_5752VENDOR_ST_M45PE10:
14122 case FLASH_5752VENDOR_ST_M45PE20:
14123 case FLASH_5752VENDOR_ST_M45PE40:
14124 tp->nvram_jedecnum = JEDEC_ST;
14125 tg3_flag_set(tp, NVRAM_BUFFERED);
14126 tg3_flag_set(tp, FLASH);
14127 tp->nvram_pagesize = 256;
14128 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14129 tp->nvram_size = (protect ?
14130 TG3_NVRAM_SIZE_64KB :
14131 TG3_NVRAM_SIZE_128KB);
14132 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14133 tp->nvram_size = (protect ?
14134 TG3_NVRAM_SIZE_64KB :
14135 TG3_NVRAM_SIZE_256KB);
14137 tp->nvram_size = (protect ?
14138 TG3_NVRAM_SIZE_128KB :
14139 TG3_NVRAM_SIZE_512KB);
14144 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14148 nvcfg1 = tr32(NVRAM_CFG1);
14150 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14151 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14152 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14153 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14154 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14155 tp->nvram_jedecnum = JEDEC_ATMEL;
14156 tg3_flag_set(tp, NVRAM_BUFFERED);
14157 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14159 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14160 tw32(NVRAM_CFG1, nvcfg1);
14162 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14163 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14164 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14165 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14166 tp->nvram_jedecnum = JEDEC_ATMEL;
14167 tg3_flag_set(tp, NVRAM_BUFFERED);
14168 tg3_flag_set(tp, FLASH);
14169 tp->nvram_pagesize = 264;
14171 case FLASH_5752VENDOR_ST_M45PE10:
14172 case FLASH_5752VENDOR_ST_M45PE20:
14173 case FLASH_5752VENDOR_ST_M45PE40:
14174 tp->nvram_jedecnum = JEDEC_ST;
14175 tg3_flag_set(tp, NVRAM_BUFFERED);
14176 tg3_flag_set(tp, FLASH);
14177 tp->nvram_pagesize = 256;
14182 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14184 u32 nvcfg1, protect = 0;
14186 nvcfg1 = tr32(NVRAM_CFG1);
14188 /* NVRAM protection for TPM */
14189 if (nvcfg1 & (1 << 27)) {
14190 tg3_flag_set(tp, PROTECTED_NVRAM);
14194 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14196 case FLASH_5761VENDOR_ATMEL_ADB021D:
14197 case FLASH_5761VENDOR_ATMEL_ADB041D:
14198 case FLASH_5761VENDOR_ATMEL_ADB081D:
14199 case FLASH_5761VENDOR_ATMEL_ADB161D:
14200 case FLASH_5761VENDOR_ATMEL_MDB021D:
14201 case FLASH_5761VENDOR_ATMEL_MDB041D:
14202 case FLASH_5761VENDOR_ATMEL_MDB081D:
14203 case FLASH_5761VENDOR_ATMEL_MDB161D:
14204 tp->nvram_jedecnum = JEDEC_ATMEL;
14205 tg3_flag_set(tp, NVRAM_BUFFERED);
14206 tg3_flag_set(tp, FLASH);
14207 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14208 tp->nvram_pagesize = 256;
14210 case FLASH_5761VENDOR_ST_A_M45PE20:
14211 case FLASH_5761VENDOR_ST_A_M45PE40:
14212 case FLASH_5761VENDOR_ST_A_M45PE80:
14213 case FLASH_5761VENDOR_ST_A_M45PE16:
14214 case FLASH_5761VENDOR_ST_M_M45PE20:
14215 case FLASH_5761VENDOR_ST_M_M45PE40:
14216 case FLASH_5761VENDOR_ST_M_M45PE80:
14217 case FLASH_5761VENDOR_ST_M_M45PE16:
14218 tp->nvram_jedecnum = JEDEC_ST;
14219 tg3_flag_set(tp, NVRAM_BUFFERED);
14220 tg3_flag_set(tp, FLASH);
14221 tp->nvram_pagesize = 256;
14226 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14229 case FLASH_5761VENDOR_ATMEL_ADB161D:
14230 case FLASH_5761VENDOR_ATMEL_MDB161D:
14231 case FLASH_5761VENDOR_ST_A_M45PE16:
14232 case FLASH_5761VENDOR_ST_M_M45PE16:
14233 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14235 case FLASH_5761VENDOR_ATMEL_ADB081D:
14236 case FLASH_5761VENDOR_ATMEL_MDB081D:
14237 case FLASH_5761VENDOR_ST_A_M45PE80:
14238 case FLASH_5761VENDOR_ST_M_M45PE80:
14239 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14241 case FLASH_5761VENDOR_ATMEL_ADB041D:
14242 case FLASH_5761VENDOR_ATMEL_MDB041D:
14243 case FLASH_5761VENDOR_ST_A_M45PE40:
14244 case FLASH_5761VENDOR_ST_M_M45PE40:
14245 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14247 case FLASH_5761VENDOR_ATMEL_ADB021D:
14248 case FLASH_5761VENDOR_ATMEL_MDB021D:
14249 case FLASH_5761VENDOR_ST_A_M45PE20:
14250 case FLASH_5761VENDOR_ST_M_M45PE20:
14251 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14257 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14259 tp->nvram_jedecnum = JEDEC_ATMEL;
14260 tg3_flag_set(tp, NVRAM_BUFFERED);
14261 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14264 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14268 nvcfg1 = tr32(NVRAM_CFG1);
14270 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14271 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14272 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14273 tp->nvram_jedecnum = JEDEC_ATMEL;
14274 tg3_flag_set(tp, NVRAM_BUFFERED);
14275 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14277 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14278 tw32(NVRAM_CFG1, nvcfg1);
14280 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14281 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14282 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14283 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14284 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14285 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14286 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14287 tp->nvram_jedecnum = JEDEC_ATMEL;
14288 tg3_flag_set(tp, NVRAM_BUFFERED);
14289 tg3_flag_set(tp, FLASH);
14291 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14292 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14293 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14294 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14295 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14297 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14298 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14299 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14301 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14302 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14303 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14307 case FLASH_5752VENDOR_ST_M45PE10:
14308 case FLASH_5752VENDOR_ST_M45PE20:
14309 case FLASH_5752VENDOR_ST_M45PE40:
14310 tp->nvram_jedecnum = JEDEC_ST;
14311 tg3_flag_set(tp, NVRAM_BUFFERED);
14312 tg3_flag_set(tp, FLASH);
14314 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14315 case FLASH_5752VENDOR_ST_M45PE10:
14316 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14318 case FLASH_5752VENDOR_ST_M45PE20:
14319 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14321 case FLASH_5752VENDOR_ST_M45PE40:
14322 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14327 tg3_flag_set(tp, NO_NVRAM);
14331 tg3_nvram_get_pagesize(tp, nvcfg1);
14332 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14333 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14337 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14341 nvcfg1 = tr32(NVRAM_CFG1);
14343 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14344 case FLASH_5717VENDOR_ATMEL_EEPROM:
14345 case FLASH_5717VENDOR_MICRO_EEPROM:
14346 tp->nvram_jedecnum = JEDEC_ATMEL;
14347 tg3_flag_set(tp, NVRAM_BUFFERED);
14348 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14350 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14351 tw32(NVRAM_CFG1, nvcfg1);
14353 case FLASH_5717VENDOR_ATMEL_MDB011D:
14354 case FLASH_5717VENDOR_ATMEL_ADB011B:
14355 case FLASH_5717VENDOR_ATMEL_ADB011D:
14356 case FLASH_5717VENDOR_ATMEL_MDB021D:
14357 case FLASH_5717VENDOR_ATMEL_ADB021B:
14358 case FLASH_5717VENDOR_ATMEL_ADB021D:
14359 case FLASH_5717VENDOR_ATMEL_45USPT:
14360 tp->nvram_jedecnum = JEDEC_ATMEL;
14361 tg3_flag_set(tp, NVRAM_BUFFERED);
14362 tg3_flag_set(tp, FLASH);
14364 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14365 case FLASH_5717VENDOR_ATMEL_MDB021D:
14366 /* Detect size with tg3_nvram_get_size() */
14368 case FLASH_5717VENDOR_ATMEL_ADB021B:
14369 case FLASH_5717VENDOR_ATMEL_ADB021D:
14370 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14373 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14377 case FLASH_5717VENDOR_ST_M_M25PE10:
14378 case FLASH_5717VENDOR_ST_A_M25PE10:
14379 case FLASH_5717VENDOR_ST_M_M45PE10:
14380 case FLASH_5717VENDOR_ST_A_M45PE10:
14381 case FLASH_5717VENDOR_ST_M_M25PE20:
14382 case FLASH_5717VENDOR_ST_A_M25PE20:
14383 case FLASH_5717VENDOR_ST_M_M45PE20:
14384 case FLASH_5717VENDOR_ST_A_M45PE20:
14385 case FLASH_5717VENDOR_ST_25USPT:
14386 case FLASH_5717VENDOR_ST_45USPT:
14387 tp->nvram_jedecnum = JEDEC_ST;
14388 tg3_flag_set(tp, NVRAM_BUFFERED);
14389 tg3_flag_set(tp, FLASH);
14391 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14392 case FLASH_5717VENDOR_ST_M_M25PE20:
14393 case FLASH_5717VENDOR_ST_M_M45PE20:
14394 /* Detect size with tg3_nvram_get_size() */
14396 case FLASH_5717VENDOR_ST_A_M25PE20:
14397 case FLASH_5717VENDOR_ST_A_M45PE20:
14398 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14401 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14406 tg3_flag_set(tp, NO_NVRAM);
14410 tg3_nvram_get_pagesize(tp, nvcfg1);
14411 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14412 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14415 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14417 u32 nvcfg1, nvmpinstrp;
14419 nvcfg1 = tr32(NVRAM_CFG1);
14420 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14422 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14423 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14424 tg3_flag_set(tp, NO_NVRAM);
14428 switch (nvmpinstrp) {
14429 case FLASH_5762_EEPROM_HD:
14430 nvmpinstrp = FLASH_5720_EEPROM_HD;
14432 case FLASH_5762_EEPROM_LD:
14433 nvmpinstrp = FLASH_5720_EEPROM_LD;
14435 case FLASH_5720VENDOR_M_ST_M45PE20:
14436 /* This pinstrap supports multiple sizes, so force it
14437 * to read the actual size from location 0xf0.
14439 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14444 switch (nvmpinstrp) {
14445 case FLASH_5720_EEPROM_HD:
14446 case FLASH_5720_EEPROM_LD:
14447 tp->nvram_jedecnum = JEDEC_ATMEL;
14448 tg3_flag_set(tp, NVRAM_BUFFERED);
14450 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14451 tw32(NVRAM_CFG1, nvcfg1);
14452 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14453 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14455 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14457 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14458 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14459 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14460 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14461 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14462 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14463 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14464 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14465 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14466 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14467 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14468 case FLASH_5720VENDOR_ATMEL_45USPT:
14469 tp->nvram_jedecnum = JEDEC_ATMEL;
14470 tg3_flag_set(tp, NVRAM_BUFFERED);
14471 tg3_flag_set(tp, FLASH);
14473 switch (nvmpinstrp) {
14474 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14475 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14476 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14477 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14479 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14480 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14481 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14482 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14484 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14485 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14486 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14489 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14490 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14494 case FLASH_5720VENDOR_M_ST_M25PE10:
14495 case FLASH_5720VENDOR_M_ST_M45PE10:
14496 case FLASH_5720VENDOR_A_ST_M25PE10:
14497 case FLASH_5720VENDOR_A_ST_M45PE10:
14498 case FLASH_5720VENDOR_M_ST_M25PE20:
14499 case FLASH_5720VENDOR_M_ST_M45PE20:
14500 case FLASH_5720VENDOR_A_ST_M25PE20:
14501 case FLASH_5720VENDOR_A_ST_M45PE20:
14502 case FLASH_5720VENDOR_M_ST_M25PE40:
14503 case FLASH_5720VENDOR_M_ST_M45PE40:
14504 case FLASH_5720VENDOR_A_ST_M25PE40:
14505 case FLASH_5720VENDOR_A_ST_M45PE40:
14506 case FLASH_5720VENDOR_M_ST_M25PE80:
14507 case FLASH_5720VENDOR_M_ST_M45PE80:
14508 case FLASH_5720VENDOR_A_ST_M25PE80:
14509 case FLASH_5720VENDOR_A_ST_M45PE80:
14510 case FLASH_5720VENDOR_ST_25USPT:
14511 case FLASH_5720VENDOR_ST_45USPT:
14512 tp->nvram_jedecnum = JEDEC_ST;
14513 tg3_flag_set(tp, NVRAM_BUFFERED);
14514 tg3_flag_set(tp, FLASH);
14516 switch (nvmpinstrp) {
14517 case FLASH_5720VENDOR_M_ST_M25PE20:
14518 case FLASH_5720VENDOR_M_ST_M45PE20:
14519 case FLASH_5720VENDOR_A_ST_M25PE20:
14520 case FLASH_5720VENDOR_A_ST_M45PE20:
14521 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14523 case FLASH_5720VENDOR_M_ST_M25PE40:
14524 case FLASH_5720VENDOR_M_ST_M45PE40:
14525 case FLASH_5720VENDOR_A_ST_M25PE40:
14526 case FLASH_5720VENDOR_A_ST_M45PE40:
14527 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14529 case FLASH_5720VENDOR_M_ST_M25PE80:
14530 case FLASH_5720VENDOR_M_ST_M45PE80:
14531 case FLASH_5720VENDOR_A_ST_M25PE80:
14532 case FLASH_5720VENDOR_A_ST_M45PE80:
14533 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14536 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14537 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14542 tg3_flag_set(tp, NO_NVRAM);
14546 tg3_nvram_get_pagesize(tp, nvcfg1);
14547 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14548 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14550 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14553 if (tg3_nvram_read(tp, 0, &val))
14556 if (val != TG3_EEPROM_MAGIC &&
14557 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14558 tg3_flag_set(tp, NO_NVRAM);
14562 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14563 static void tg3_nvram_init(struct tg3 *tp)
14565 if (tg3_flag(tp, IS_SSB_CORE)) {
14566 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14567 tg3_flag_clear(tp, NVRAM);
14568 tg3_flag_clear(tp, NVRAM_BUFFERED);
14569 tg3_flag_set(tp, NO_NVRAM);
14573 tw32_f(GRC_EEPROM_ADDR,
14574 (EEPROM_ADDR_FSM_RESET |
14575 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14576 EEPROM_ADDR_CLKPERD_SHIFT)));
14580 /* Enable seeprom accesses. */
14581 tw32_f(GRC_LOCAL_CTRL,
14582 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14585 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14586 tg3_asic_rev(tp) != ASIC_REV_5701) {
14587 tg3_flag_set(tp, NVRAM);
14589 if (tg3_nvram_lock(tp)) {
14590 netdev_warn(tp->dev,
14591 "Cannot get nvram lock, %s failed\n",
14595 tg3_enable_nvram_access(tp);
14597 tp->nvram_size = 0;
14599 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14600 tg3_get_5752_nvram_info(tp);
14601 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14602 tg3_get_5755_nvram_info(tp);
14603 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14604 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14605 tg3_asic_rev(tp) == ASIC_REV_5785)
14606 tg3_get_5787_nvram_info(tp);
14607 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14608 tg3_get_5761_nvram_info(tp);
14609 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14610 tg3_get_5906_nvram_info(tp);
14611 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14612 tg3_flag(tp, 57765_CLASS))
14613 tg3_get_57780_nvram_info(tp);
14614 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14615 tg3_asic_rev(tp) == ASIC_REV_5719)
14616 tg3_get_5717_nvram_info(tp);
14617 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14618 tg3_asic_rev(tp) == ASIC_REV_5762)
14619 tg3_get_5720_nvram_info(tp);
14621 tg3_get_nvram_info(tp);
14623 if (tp->nvram_size == 0)
14624 tg3_get_nvram_size(tp);
14626 tg3_disable_nvram_access(tp);
14627 tg3_nvram_unlock(tp);
14630 tg3_flag_clear(tp, NVRAM);
14631 tg3_flag_clear(tp, NVRAM_BUFFERED);
14633 tg3_get_eeprom_size(tp);
14637 struct subsys_tbl_ent {
14638 u16 subsys_vendor, subsys_devid;
14642 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14643 /* Broadcom boards. */
14644 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14645 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14646 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14647 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14648 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14649 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14650 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14651 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14652 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14653 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14654 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14655 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14656 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14657 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14658 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14659 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14660 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14661 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14662 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14663 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14664 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14665 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14668 { TG3PCI_SUBVENDOR_ID_3COM,
14669 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14670 { TG3PCI_SUBVENDOR_ID_3COM,
14671 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14672 { TG3PCI_SUBVENDOR_ID_3COM,
14673 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14674 { TG3PCI_SUBVENDOR_ID_3COM,
14675 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14676 { TG3PCI_SUBVENDOR_ID_3COM,
14677 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14680 { TG3PCI_SUBVENDOR_ID_DELL,
14681 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14682 { TG3PCI_SUBVENDOR_ID_DELL,
14683 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14684 { TG3PCI_SUBVENDOR_ID_DELL,
14685 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14686 { TG3PCI_SUBVENDOR_ID_DELL,
14687 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14689 /* Compaq boards. */
14690 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14691 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14692 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14693 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14694 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14695 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14696 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14697 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14698 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14699 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14702 { TG3PCI_SUBVENDOR_ID_IBM,
14703 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14706 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14710 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14711 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14712 tp->pdev->subsystem_vendor) &&
14713 (subsys_id_to_phy_id[i].subsys_devid ==
14714 tp->pdev->subsystem_device))
14715 return &subsys_id_to_phy_id[i];
14720 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14724 tp->phy_id = TG3_PHY_ID_INVALID;
14725 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14727 /* Assume an onboard device and WOL capable by default. */
14728 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14729 tg3_flag_set(tp, WOL_CAP);
14731 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14732 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14733 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14734 tg3_flag_set(tp, IS_NIC);
14736 val = tr32(VCPU_CFGSHDW);
14737 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14738 tg3_flag_set(tp, ASPM_WORKAROUND);
14739 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14740 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14741 tg3_flag_set(tp, WOL_ENABLE);
14742 device_set_wakeup_enable(&tp->pdev->dev, true);
14747 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14748 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14749 u32 nic_cfg, led_cfg;
14750 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14751 int eeprom_phy_serdes = 0;
14753 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14754 tp->nic_sram_data_cfg = nic_cfg;
14756 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14757 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14758 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14759 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14760 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14761 (ver > 0) && (ver < 0x100))
14762 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14764 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14765 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14767 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14768 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14769 eeprom_phy_serdes = 1;
14771 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14772 if (nic_phy_id != 0) {
14773 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14774 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14776 eeprom_phy_id = (id1 >> 16) << 10;
14777 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14778 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14782 tp->phy_id = eeprom_phy_id;
14783 if (eeprom_phy_serdes) {
14784 if (!tg3_flag(tp, 5705_PLUS))
14785 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14787 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14790 if (tg3_flag(tp, 5750_PLUS))
14791 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14792 SHASTA_EXT_LED_MODE_MASK);
14794 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14798 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14799 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14802 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14803 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14806 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14807 tp->led_ctrl = LED_CTRL_MODE_MAC;
14809 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14810 * read on some older 5700/5701 bootcode.
14812 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14813 tg3_asic_rev(tp) == ASIC_REV_5701)
14814 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14818 case SHASTA_EXT_LED_SHARED:
14819 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14820 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14821 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14822 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14823 LED_CTRL_MODE_PHY_2);
14826 case SHASTA_EXT_LED_MAC:
14827 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14830 case SHASTA_EXT_LED_COMBO:
14831 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14832 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14833 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14834 LED_CTRL_MODE_PHY_2);
14839 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14840 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14841 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14842 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14844 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14845 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14847 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14848 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14849 if ((tp->pdev->subsystem_vendor ==
14850 PCI_VENDOR_ID_ARIMA) &&
14851 (tp->pdev->subsystem_device == 0x205a ||
14852 tp->pdev->subsystem_device == 0x2063))
14853 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14855 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14856 tg3_flag_set(tp, IS_NIC);
14859 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14860 tg3_flag_set(tp, ENABLE_ASF);
14861 if (tg3_flag(tp, 5750_PLUS))
14862 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14865 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14866 tg3_flag(tp, 5750_PLUS))
14867 tg3_flag_set(tp, ENABLE_APE);
14869 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14870 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14871 tg3_flag_clear(tp, WOL_CAP);
14873 if (tg3_flag(tp, WOL_CAP) &&
14874 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14875 tg3_flag_set(tp, WOL_ENABLE);
14876 device_set_wakeup_enable(&tp->pdev->dev, true);
14879 if (cfg2 & (1 << 17))
14880 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14882 /* serdes signal pre-emphasis in register 0x590 set by */
14883 /* bootcode if bit 18 is set */
14884 if (cfg2 & (1 << 18))
14885 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14887 if ((tg3_flag(tp, 57765_PLUS) ||
14888 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14889 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14890 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14891 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14893 if (tg3_flag(tp, PCI_EXPRESS)) {
14896 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14897 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14898 !tg3_flag(tp, 57765_PLUS) &&
14899 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14900 tg3_flag_set(tp, ASPM_WORKAROUND);
14901 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14902 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14903 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14904 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14907 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14908 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14909 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14910 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14911 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14912 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14915 if (tg3_flag(tp, WOL_CAP))
14916 device_set_wakeup_enable(&tp->pdev->dev,
14917 tg3_flag(tp, WOL_ENABLE));
14919 device_set_wakeup_capable(&tp->pdev->dev, false);
14922 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14925 u32 val2, off = offset * 8;
14927 err = tg3_nvram_lock(tp);
14931 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14932 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14933 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14934 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14937 for (i = 0; i < 100; i++) {
14938 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14939 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14940 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14946 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14948 tg3_nvram_unlock(tp);
14949 if (val2 & APE_OTP_STATUS_CMD_DONE)
14955 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14960 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14961 tw32(OTP_CTRL, cmd);
14963 /* Wait for up to 1 ms for command to execute. */
14964 for (i = 0; i < 100; i++) {
14965 val = tr32(OTP_STATUS);
14966 if (val & OTP_STATUS_CMD_DONE)
14971 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14974 /* Read the gphy configuration from the OTP region of the chip. The gphy
14975 * configuration is a 32-bit value that straddles the alignment boundary.
14976 * We do two 32-bit reads and then shift and merge the results.
14978 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14980 u32 bhalf_otp, thalf_otp;
14982 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14984 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14987 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14989 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14992 thalf_otp = tr32(OTP_READ_DATA);
14994 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14996 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14999 bhalf_otp = tr32(OTP_READ_DATA);
15001 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15004 static void tg3_phy_init_link_config(struct tg3 *tp)
15006 u32 adv = ADVERTISED_Autoneg;
15008 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15009 adv |= ADVERTISED_1000baseT_Half |
15010 ADVERTISED_1000baseT_Full;
15012 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15013 adv |= ADVERTISED_100baseT_Half |
15014 ADVERTISED_100baseT_Full |
15015 ADVERTISED_10baseT_Half |
15016 ADVERTISED_10baseT_Full |
15019 adv |= ADVERTISED_FIBRE;
15021 tp->link_config.advertising = adv;
15022 tp->link_config.speed = SPEED_UNKNOWN;
15023 tp->link_config.duplex = DUPLEX_UNKNOWN;
15024 tp->link_config.autoneg = AUTONEG_ENABLE;
15025 tp->link_config.active_speed = SPEED_UNKNOWN;
15026 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15031 static int tg3_phy_probe(struct tg3 *tp)
15033 u32 hw_phy_id_1, hw_phy_id_2;
15034 u32 hw_phy_id, hw_phy_id_masked;
15037 /* flow control autonegotiation is default behavior */
15038 tg3_flag_set(tp, PAUSE_AUTONEG);
15039 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15041 if (tg3_flag(tp, ENABLE_APE)) {
15042 switch (tp->pci_fn) {
15044 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15047 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15050 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15053 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15058 if (!tg3_flag(tp, ENABLE_ASF) &&
15059 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15060 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15061 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15062 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15064 if (tg3_flag(tp, USE_PHYLIB))
15065 return tg3_phy_init(tp);
15067 /* Reading the PHY ID register can conflict with ASF
15068 * firmware access to the PHY hardware.
15071 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15072 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15074 /* Now read the physical PHY_ID from the chip and verify
15075 * that it is sane. If it doesn't look good, we fall back
15076 * to either the hard-coded table based PHY_ID and failing
15077 * that the value found in the eeprom area.
15079 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15080 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15082 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15083 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15084 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15086 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15089 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15090 tp->phy_id = hw_phy_id;
15091 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15092 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15094 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15096 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15097 /* Do nothing, phy ID already set up in
15098 * tg3_get_eeprom_hw_cfg().
15101 struct subsys_tbl_ent *p;
15103 /* No eeprom signature? Try the hardcoded
15104 * subsys device table.
15106 p = tg3_lookup_by_subsys(tp);
15108 tp->phy_id = p->phy_id;
15109 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15110 /* For now we saw the IDs 0xbc050cd0,
15111 * 0xbc050f80 and 0xbc050c30 on devices
15112 * connected to an BCM4785 and there are
15113 * probably more. Just assume that the phy is
15114 * supported when it is connected to a SSB core
15121 tp->phy_id == TG3_PHY_ID_BCM8002)
15122 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15126 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15127 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15128 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15129 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15130 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15131 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15132 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15133 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15134 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15135 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15137 tp->eee.supported = SUPPORTED_100baseT_Full |
15138 SUPPORTED_1000baseT_Full;
15139 tp->eee.advertised = ADVERTISED_100baseT_Full |
15140 ADVERTISED_1000baseT_Full;
15141 tp->eee.eee_enabled = 1;
15142 tp->eee.tx_lpi_enabled = 1;
15143 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15146 tg3_phy_init_link_config(tp);
15148 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15149 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15150 !tg3_flag(tp, ENABLE_APE) &&
15151 !tg3_flag(tp, ENABLE_ASF)) {
15154 tg3_readphy(tp, MII_BMSR, &bmsr);
15155 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15156 (bmsr & BMSR_LSTATUS))
15157 goto skip_phy_reset;
15159 err = tg3_phy_reset(tp);
15163 tg3_phy_set_wirespeed(tp);
15165 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15166 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15167 tp->link_config.flowctrl);
15169 tg3_writephy(tp, MII_BMCR,
15170 BMCR_ANENABLE | BMCR_ANRESTART);
15175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15176 err = tg3_init_5401phy_dsp(tp);
15180 err = tg3_init_5401phy_dsp(tp);
15186 static void tg3_read_vpd(struct tg3 *tp)
15189 unsigned int block_end, rosize, len;
15193 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15197 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15199 goto out_not_found;
15201 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15202 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15203 i += PCI_VPD_LRDT_TAG_SIZE;
15205 if (block_end > vpdlen)
15206 goto out_not_found;
15208 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15209 PCI_VPD_RO_KEYWORD_MFR_ID);
15211 len = pci_vpd_info_field_size(&vpd_data[j]);
15213 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15214 if (j + len > block_end || len != 4 ||
15215 memcmp(&vpd_data[j], "1028", 4))
15218 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15219 PCI_VPD_RO_KEYWORD_VENDOR0);
15223 len = pci_vpd_info_field_size(&vpd_data[j]);
15225 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15226 if (j + len > block_end)
15229 if (len >= sizeof(tp->fw_ver))
15230 len = sizeof(tp->fw_ver) - 1;
15231 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15232 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15237 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15238 PCI_VPD_RO_KEYWORD_PARTNO);
15240 goto out_not_found;
15242 len = pci_vpd_info_field_size(&vpd_data[i]);
15244 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15245 if (len > TG3_BPN_SIZE ||
15246 (len + i) > vpdlen)
15247 goto out_not_found;
15249 memcpy(tp->board_part_number, &vpd_data[i], len);
15253 if (tp->board_part_number[0])
15257 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15258 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15259 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15260 strcpy(tp->board_part_number, "BCM5717");
15261 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15262 strcpy(tp->board_part_number, "BCM5718");
15265 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15266 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15267 strcpy(tp->board_part_number, "BCM57780");
15268 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15269 strcpy(tp->board_part_number, "BCM57760");
15270 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15271 strcpy(tp->board_part_number, "BCM57790");
15272 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15273 strcpy(tp->board_part_number, "BCM57788");
15276 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15277 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15278 strcpy(tp->board_part_number, "BCM57761");
15279 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15280 strcpy(tp->board_part_number, "BCM57765");
15281 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15282 strcpy(tp->board_part_number, "BCM57781");
15283 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15284 strcpy(tp->board_part_number, "BCM57785");
15285 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15286 strcpy(tp->board_part_number, "BCM57791");
15287 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15288 strcpy(tp->board_part_number, "BCM57795");
15291 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15292 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15293 strcpy(tp->board_part_number, "BCM57762");
15294 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15295 strcpy(tp->board_part_number, "BCM57766");
15296 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15297 strcpy(tp->board_part_number, "BCM57782");
15298 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15299 strcpy(tp->board_part_number, "BCM57786");
15302 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15303 strcpy(tp->board_part_number, "BCM95906");
15306 strcpy(tp->board_part_number, "none");
15310 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15314 if (tg3_nvram_read(tp, offset, &val) ||
15315 (val & 0xfc000000) != 0x0c000000 ||
15316 tg3_nvram_read(tp, offset + 4, &val) ||
15323 static void tg3_read_bc_ver(struct tg3 *tp)
15325 u32 val, offset, start, ver_offset;
15327 bool newver = false;
15329 if (tg3_nvram_read(tp, 0xc, &offset) ||
15330 tg3_nvram_read(tp, 0x4, &start))
15333 offset = tg3_nvram_logical_addr(tp, offset);
15335 if (tg3_nvram_read(tp, offset, &val))
15338 if ((val & 0xfc000000) == 0x0c000000) {
15339 if (tg3_nvram_read(tp, offset + 4, &val))
15346 dst_off = strlen(tp->fw_ver);
15349 if (TG3_VER_SIZE - dst_off < 16 ||
15350 tg3_nvram_read(tp, offset + 8, &ver_offset))
15353 offset = offset + ver_offset - start;
15354 for (i = 0; i < 16; i += 4) {
15356 if (tg3_nvram_read_be32(tp, offset + i, &v))
15359 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15364 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15367 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15368 TG3_NVM_BCVER_MAJSFT;
15369 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15370 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15371 "v%d.%02d", major, minor);
15375 static void tg3_read_hwsb_ver(struct tg3 *tp)
15377 u32 val, major, minor;
15379 /* Use native endian representation */
15380 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15383 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15384 TG3_NVM_HWSB_CFG1_MAJSFT;
15385 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15386 TG3_NVM_HWSB_CFG1_MINSFT;
15388 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15391 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15393 u32 offset, major, minor, build;
15395 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15397 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15400 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15401 case TG3_EEPROM_SB_REVISION_0:
15402 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15404 case TG3_EEPROM_SB_REVISION_2:
15405 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15407 case TG3_EEPROM_SB_REVISION_3:
15408 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15410 case TG3_EEPROM_SB_REVISION_4:
15411 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15413 case TG3_EEPROM_SB_REVISION_5:
15414 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15416 case TG3_EEPROM_SB_REVISION_6:
15417 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15423 if (tg3_nvram_read(tp, offset, &val))
15426 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15427 TG3_EEPROM_SB_EDH_BLD_SHFT;
15428 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15429 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15430 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15432 if (minor > 99 || build > 26)
15435 offset = strlen(tp->fw_ver);
15436 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15437 " v%d.%02d", major, minor);
15440 offset = strlen(tp->fw_ver);
15441 if (offset < TG3_VER_SIZE - 1)
15442 tp->fw_ver[offset] = 'a' + build - 1;
15446 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15448 u32 val, offset, start;
15451 for (offset = TG3_NVM_DIR_START;
15452 offset < TG3_NVM_DIR_END;
15453 offset += TG3_NVM_DIRENT_SIZE) {
15454 if (tg3_nvram_read(tp, offset, &val))
15457 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15461 if (offset == TG3_NVM_DIR_END)
15464 if (!tg3_flag(tp, 5705_PLUS))
15465 start = 0x08000000;
15466 else if (tg3_nvram_read(tp, offset - 4, &start))
15469 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15470 !tg3_fw_img_is_valid(tp, offset) ||
15471 tg3_nvram_read(tp, offset + 8, &val))
15474 offset += val - start;
15476 vlen = strlen(tp->fw_ver);
15478 tp->fw_ver[vlen++] = ',';
15479 tp->fw_ver[vlen++] = ' ';
15481 for (i = 0; i < 4; i++) {
15483 if (tg3_nvram_read_be32(tp, offset, &v))
15486 offset += sizeof(v);
15488 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15489 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15493 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15498 static void tg3_probe_ncsi(struct tg3 *tp)
15502 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15503 if (apedata != APE_SEG_SIG_MAGIC)
15506 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15507 if (!(apedata & APE_FW_STATUS_READY))
15510 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15511 tg3_flag_set(tp, APE_HAS_NCSI);
15514 static void tg3_read_dash_ver(struct tg3 *tp)
15520 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15522 if (tg3_flag(tp, APE_HAS_NCSI))
15524 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15529 vlen = strlen(tp->fw_ver);
15531 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15533 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15534 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15535 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15536 (apedata & APE_FW_VERSION_BLDMSK));
15539 static void tg3_read_otp_ver(struct tg3 *tp)
15543 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15546 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15547 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15548 TG3_OTP_MAGIC0_VALID(val)) {
15549 u64 val64 = (u64) val << 32 | val2;
15553 for (i = 0; i < 7; i++) {
15554 if ((val64 & 0xff) == 0)
15556 ver = val64 & 0xff;
15559 vlen = strlen(tp->fw_ver);
15560 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15564 static void tg3_read_fw_ver(struct tg3 *tp)
15567 bool vpd_vers = false;
15569 if (tp->fw_ver[0] != 0)
15572 if (tg3_flag(tp, NO_NVRAM)) {
15573 strcat(tp->fw_ver, "sb");
15574 tg3_read_otp_ver(tp);
15578 if (tg3_nvram_read(tp, 0, &val))
15581 if (val == TG3_EEPROM_MAGIC)
15582 tg3_read_bc_ver(tp);
15583 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15584 tg3_read_sb_ver(tp, val);
15585 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15586 tg3_read_hwsb_ver(tp);
15588 if (tg3_flag(tp, ENABLE_ASF)) {
15589 if (tg3_flag(tp, ENABLE_APE)) {
15590 tg3_probe_ncsi(tp);
15592 tg3_read_dash_ver(tp);
15593 } else if (!vpd_vers) {
15594 tg3_read_mgmtfw_ver(tp);
15598 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15601 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15603 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15604 return TG3_RX_RET_MAX_SIZE_5717;
15605 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15606 return TG3_RX_RET_MAX_SIZE_5700;
15608 return TG3_RX_RET_MAX_SIZE_5705;
15611 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15612 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15613 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15614 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15618 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15620 struct pci_dev *peer;
15621 unsigned int func, devnr = tp->pdev->devfn & ~7;
15623 for (func = 0; func < 8; func++) {
15624 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15625 if (peer && peer != tp->pdev)
15629 /* 5704 can be configured in single-port mode, set peer to
15630 * tp->pdev in that case.
15638 * We don't need to keep the refcount elevated; there's no way
15639 * to remove one half of this device without removing the other
15646 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15648 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15649 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15652 /* All devices that use the alternate
15653 * ASIC REV location have a CPMU.
15655 tg3_flag_set(tp, CPMU_PRESENT);
15657 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15658 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15659 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15660 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15661 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15662 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15663 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15664 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15665 reg = TG3PCI_GEN2_PRODID_ASICREV;
15666 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15667 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15668 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15669 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15670 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15671 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15672 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15673 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15674 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15675 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15676 reg = TG3PCI_GEN15_PRODID_ASICREV;
15678 reg = TG3PCI_PRODID_ASICREV;
15680 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15683 /* Wrong chip ID in 5752 A0. This code can be removed later
15684 * as A0 is not in production.
15686 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15687 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15689 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15690 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15692 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15693 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15694 tg3_asic_rev(tp) == ASIC_REV_5720)
15695 tg3_flag_set(tp, 5717_PLUS);
15697 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15698 tg3_asic_rev(tp) == ASIC_REV_57766)
15699 tg3_flag_set(tp, 57765_CLASS);
15701 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15702 tg3_asic_rev(tp) == ASIC_REV_5762)
15703 tg3_flag_set(tp, 57765_PLUS);
15705 /* Intentionally exclude ASIC_REV_5906 */
15706 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15707 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15708 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15709 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15710 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15711 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15712 tg3_flag(tp, 57765_PLUS))
15713 tg3_flag_set(tp, 5755_PLUS);
15715 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15716 tg3_asic_rev(tp) == ASIC_REV_5714)
15717 tg3_flag_set(tp, 5780_CLASS);
15719 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15720 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15721 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15722 tg3_flag(tp, 5755_PLUS) ||
15723 tg3_flag(tp, 5780_CLASS))
15724 tg3_flag_set(tp, 5750_PLUS);
15726 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15727 tg3_flag(tp, 5750_PLUS))
15728 tg3_flag_set(tp, 5705_PLUS);
15731 static bool tg3_10_100_only_device(struct tg3 *tp,
15732 const struct pci_device_id *ent)
15734 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15736 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15737 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15738 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15741 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15742 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15743 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15753 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15756 u32 pci_state_reg, grc_misc_cfg;
15761 /* Force memory write invalidate off. If we leave it on,
15762 * then on 5700_BX chips we have to enable a workaround.
15763 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15764 * to match the cacheline size. The Broadcom driver have this
15765 * workaround but turns MWI off all the times so never uses
15766 * it. This seems to suggest that the workaround is insufficient.
15768 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15769 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15770 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15772 /* Important! -- Make sure register accesses are byteswapped
15773 * correctly. Also, for those chips that require it, make
15774 * sure that indirect register accesses are enabled before
15775 * the first operation.
15777 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15779 tp->misc_host_ctrl |= (misc_ctrl_reg &
15780 MISC_HOST_CTRL_CHIPREV);
15781 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15782 tp->misc_host_ctrl);
15784 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15786 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15787 * we need to disable memory and use config. cycles
15788 * only to access all registers. The 5702/03 chips
15789 * can mistakenly decode the special cycles from the
15790 * ICH chipsets as memory write cycles, causing corruption
15791 * of register and memory space. Only certain ICH bridges
15792 * will drive special cycles with non-zero data during the
15793 * address phase which can fall within the 5703's address
15794 * range. This is not an ICH bug as the PCI spec allows
15795 * non-zero address during special cycles. However, only
15796 * these ICH bridges are known to drive non-zero addresses
15797 * during special cycles.
15799 * Since special cycles do not cross PCI bridges, we only
15800 * enable this workaround if the 5703 is on the secondary
15801 * bus of these ICH bridges.
15803 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15804 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15805 static struct tg3_dev_id {
15809 } ich_chipsets[] = {
15810 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15812 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15814 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15816 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15820 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15821 struct pci_dev *bridge = NULL;
15823 while (pci_id->vendor != 0) {
15824 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15830 if (pci_id->rev != PCI_ANY_ID) {
15831 if (bridge->revision > pci_id->rev)
15834 if (bridge->subordinate &&
15835 (bridge->subordinate->number ==
15836 tp->pdev->bus->number)) {
15837 tg3_flag_set(tp, ICH_WORKAROUND);
15838 pci_dev_put(bridge);
15844 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15845 static struct tg3_dev_id {
15848 } bridge_chipsets[] = {
15849 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15850 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15853 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15854 struct pci_dev *bridge = NULL;
15856 while (pci_id->vendor != 0) {
15857 bridge = pci_get_device(pci_id->vendor,
15864 if (bridge->subordinate &&
15865 (bridge->subordinate->number <=
15866 tp->pdev->bus->number) &&
15867 (bridge->subordinate->busn_res.end >=
15868 tp->pdev->bus->number)) {
15869 tg3_flag_set(tp, 5701_DMA_BUG);
15870 pci_dev_put(bridge);
15876 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15877 * DMA addresses > 40-bit. This bridge may have other additional
15878 * 57xx devices behind it in some 4-port NIC designs for example.
15879 * Any tg3 device found behind the bridge will also need the 40-bit
15882 if (tg3_flag(tp, 5780_CLASS)) {
15883 tg3_flag_set(tp, 40BIT_DMA_BUG);
15884 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15886 struct pci_dev *bridge = NULL;
15889 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15890 PCI_DEVICE_ID_SERVERWORKS_EPB,
15892 if (bridge && bridge->subordinate &&
15893 (bridge->subordinate->number <=
15894 tp->pdev->bus->number) &&
15895 (bridge->subordinate->busn_res.end >=
15896 tp->pdev->bus->number)) {
15897 tg3_flag_set(tp, 40BIT_DMA_BUG);
15898 pci_dev_put(bridge);
15904 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15905 tg3_asic_rev(tp) == ASIC_REV_5714)
15906 tp->pdev_peer = tg3_find_peer(tp);
15908 /* Determine TSO capabilities */
15909 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15910 ; /* Do nothing. HW bug. */
15911 else if (tg3_flag(tp, 57765_PLUS))
15912 tg3_flag_set(tp, HW_TSO_3);
15913 else if (tg3_flag(tp, 5755_PLUS) ||
15914 tg3_asic_rev(tp) == ASIC_REV_5906)
15915 tg3_flag_set(tp, HW_TSO_2);
15916 else if (tg3_flag(tp, 5750_PLUS)) {
15917 tg3_flag_set(tp, HW_TSO_1);
15918 tg3_flag_set(tp, TSO_BUG);
15919 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15920 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15921 tg3_flag_clear(tp, TSO_BUG);
15922 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15923 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15924 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15925 tg3_flag_set(tp, FW_TSO);
15926 tg3_flag_set(tp, TSO_BUG);
15927 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15928 tp->fw_needed = FIRMWARE_TG3TSO5;
15930 tp->fw_needed = FIRMWARE_TG3TSO;
15933 /* Selectively allow TSO based on operating conditions */
15934 if (tg3_flag(tp, HW_TSO_1) ||
15935 tg3_flag(tp, HW_TSO_2) ||
15936 tg3_flag(tp, HW_TSO_3) ||
15937 tg3_flag(tp, FW_TSO)) {
15938 /* For firmware TSO, assume ASF is disabled.
15939 * We'll disable TSO later if we discover ASF
15940 * is enabled in tg3_get_eeprom_hw_cfg().
15942 tg3_flag_set(tp, TSO_CAPABLE);
15944 tg3_flag_clear(tp, TSO_CAPABLE);
15945 tg3_flag_clear(tp, TSO_BUG);
15946 tp->fw_needed = NULL;
15949 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15950 tp->fw_needed = FIRMWARE_TG3;
15952 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15953 tp->fw_needed = FIRMWARE_TG357766;
15957 if (tg3_flag(tp, 5750_PLUS)) {
15958 tg3_flag_set(tp, SUPPORT_MSI);
15959 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15960 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15961 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15962 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15963 tp->pdev_peer == tp->pdev))
15964 tg3_flag_clear(tp, SUPPORT_MSI);
15966 if (tg3_flag(tp, 5755_PLUS) ||
15967 tg3_asic_rev(tp) == ASIC_REV_5906) {
15968 tg3_flag_set(tp, 1SHOT_MSI);
15971 if (tg3_flag(tp, 57765_PLUS)) {
15972 tg3_flag_set(tp, SUPPORT_MSIX);
15973 tp->irq_max = TG3_IRQ_MAX_VECS;
15979 if (tp->irq_max > 1) {
15980 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15981 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15983 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15984 tg3_asic_rev(tp) == ASIC_REV_5720)
15985 tp->txq_max = tp->irq_max - 1;
15988 if (tg3_flag(tp, 5755_PLUS) ||
15989 tg3_asic_rev(tp) == ASIC_REV_5906)
15990 tg3_flag_set(tp, SHORT_DMA_BUG);
15992 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15993 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15995 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15996 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15997 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15998 tg3_asic_rev(tp) == ASIC_REV_5762)
15999 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16001 if (tg3_flag(tp, 57765_PLUS) &&
16002 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16003 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16005 if (!tg3_flag(tp, 5705_PLUS) ||
16006 tg3_flag(tp, 5780_CLASS) ||
16007 tg3_flag(tp, USE_JUMBO_BDFLAG))
16008 tg3_flag_set(tp, JUMBO_CAPABLE);
16010 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16013 if (pci_is_pcie(tp->pdev)) {
16016 tg3_flag_set(tp, PCI_EXPRESS);
16018 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16019 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16020 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16021 tg3_flag_clear(tp, HW_TSO_2);
16022 tg3_flag_clear(tp, TSO_CAPABLE);
16024 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16025 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16026 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16027 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16028 tg3_flag_set(tp, CLKREQ_BUG);
16029 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16030 tg3_flag_set(tp, L1PLLPD_EN);
16032 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16033 /* BCM5785 devices are effectively PCIe devices, and should
16034 * follow PCIe codepaths, but do not have a PCIe capabilities
16037 tg3_flag_set(tp, PCI_EXPRESS);
16038 } else if (!tg3_flag(tp, 5705_PLUS) ||
16039 tg3_flag(tp, 5780_CLASS)) {
16040 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16041 if (!tp->pcix_cap) {
16042 dev_err(&tp->pdev->dev,
16043 "Cannot find PCI-X capability, aborting\n");
16047 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16048 tg3_flag_set(tp, PCIX_MODE);
16051 /* If we have an AMD 762 or VIA K8T800 chipset, write
16052 * reordering to the mailbox registers done by the host
16053 * controller can cause major troubles. We read back from
16054 * every mailbox register write to force the writes to be
16055 * posted to the chip in order.
16057 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16058 !tg3_flag(tp, PCI_EXPRESS))
16059 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16061 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16062 &tp->pci_cacheline_sz);
16063 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16064 &tp->pci_lat_timer);
16065 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16066 tp->pci_lat_timer < 64) {
16067 tp->pci_lat_timer = 64;
16068 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16069 tp->pci_lat_timer);
16072 /* Important! -- It is critical that the PCI-X hw workaround
16073 * situation is decided before the first MMIO register access.
16075 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16076 /* 5700 BX chips need to have their TX producer index
16077 * mailboxes written twice to workaround a bug.
16079 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16081 /* If we are in PCI-X mode, enable register write workaround.
16083 * The workaround is to use indirect register accesses
16084 * for all chip writes not to mailbox registers.
16086 if (tg3_flag(tp, PCIX_MODE)) {
16089 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16091 /* The chip can have it's power management PCI config
16092 * space registers clobbered due to this bug.
16093 * So explicitly force the chip into D0 here.
16095 pci_read_config_dword(tp->pdev,
16096 tp->pm_cap + PCI_PM_CTRL,
16098 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16099 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16100 pci_write_config_dword(tp->pdev,
16101 tp->pm_cap + PCI_PM_CTRL,
16104 /* Also, force SERR#/PERR# in PCI command. */
16105 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16106 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16107 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16111 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16112 tg3_flag_set(tp, PCI_HIGH_SPEED);
16113 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16114 tg3_flag_set(tp, PCI_32BIT);
16116 /* Chip-specific fixup from Broadcom driver */
16117 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16118 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16119 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16120 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16123 /* Default fast path register access methods */
16124 tp->read32 = tg3_read32;
16125 tp->write32 = tg3_write32;
16126 tp->read32_mbox = tg3_read32;
16127 tp->write32_mbox = tg3_write32;
16128 tp->write32_tx_mbox = tg3_write32;
16129 tp->write32_rx_mbox = tg3_write32;
16131 /* Various workaround register access methods */
16132 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16133 tp->write32 = tg3_write_indirect_reg32;
16134 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16135 (tg3_flag(tp, PCI_EXPRESS) &&
16136 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16138 * Back to back register writes can cause problems on these
16139 * chips, the workaround is to read back all reg writes
16140 * except those to mailbox regs.
16142 * See tg3_write_indirect_reg32().
16144 tp->write32 = tg3_write_flush_reg32;
16147 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16148 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16149 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16150 tp->write32_rx_mbox = tg3_write_flush_reg32;
16153 if (tg3_flag(tp, ICH_WORKAROUND)) {
16154 tp->read32 = tg3_read_indirect_reg32;
16155 tp->write32 = tg3_write_indirect_reg32;
16156 tp->read32_mbox = tg3_read_indirect_mbox;
16157 tp->write32_mbox = tg3_write_indirect_mbox;
16158 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16159 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16164 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16165 pci_cmd &= ~PCI_COMMAND_MEMORY;
16166 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16168 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16169 tp->read32_mbox = tg3_read32_mbox_5906;
16170 tp->write32_mbox = tg3_write32_mbox_5906;
16171 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16172 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16175 if (tp->write32 == tg3_write_indirect_reg32 ||
16176 (tg3_flag(tp, PCIX_MODE) &&
16177 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16178 tg3_asic_rev(tp) == ASIC_REV_5701)))
16179 tg3_flag_set(tp, SRAM_USE_CONFIG);
16181 /* The memory arbiter has to be enabled in order for SRAM accesses
16182 * to succeed. Normally on powerup the tg3 chip firmware will make
16183 * sure it is enabled, but other entities such as system netboot
16184 * code might disable it.
16186 val = tr32(MEMARB_MODE);
16187 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16189 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16190 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16191 tg3_flag(tp, 5780_CLASS)) {
16192 if (tg3_flag(tp, PCIX_MODE)) {
16193 pci_read_config_dword(tp->pdev,
16194 tp->pcix_cap + PCI_X_STATUS,
16196 tp->pci_fn = val & 0x7;
16198 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16199 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16200 tg3_asic_rev(tp) == ASIC_REV_5720) {
16201 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16202 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16203 val = tr32(TG3_CPMU_STATUS);
16205 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16206 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16208 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16209 TG3_CPMU_STATUS_FSHFT_5719;
16212 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16213 tp->write32_tx_mbox = tg3_write_flush_reg32;
16214 tp->write32_rx_mbox = tg3_write_flush_reg32;
16217 /* Get eeprom hw config before calling tg3_set_power_state().
16218 * In particular, the TG3_FLAG_IS_NIC flag must be
16219 * determined before calling tg3_set_power_state() so that
16220 * we know whether or not to switch out of Vaux power.
16221 * When the flag is set, it means that GPIO1 is used for eeprom
16222 * write protect and also implies that it is a LOM where GPIOs
16223 * are not used to switch power.
16225 tg3_get_eeprom_hw_cfg(tp);
16227 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16228 tg3_flag_clear(tp, TSO_CAPABLE);
16229 tg3_flag_clear(tp, TSO_BUG);
16230 tp->fw_needed = NULL;
16233 if (tg3_flag(tp, ENABLE_APE)) {
16234 /* Allow reads and writes to the
16235 * APE register and memory space.
16237 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16238 PCISTATE_ALLOW_APE_SHMEM_WR |
16239 PCISTATE_ALLOW_APE_PSPACE_WR;
16240 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16243 tg3_ape_lock_init(tp);
16246 /* Set up tp->grc_local_ctrl before calling
16247 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16248 * will bring 5700's external PHY out of reset.
16249 * It is also used as eeprom write protect on LOMs.
16251 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16252 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16253 tg3_flag(tp, EEPROM_WRITE_PROT))
16254 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16255 GRC_LCLCTRL_GPIO_OUTPUT1);
16256 /* Unused GPIO3 must be driven as output on 5752 because there
16257 * are no pull-up resistors on unused GPIO pins.
16259 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16260 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16262 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16263 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16264 tg3_flag(tp, 57765_CLASS))
16265 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16267 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16268 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16269 /* Turn off the debug UART. */
16270 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16271 if (tg3_flag(tp, IS_NIC))
16272 /* Keep VMain power. */
16273 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16274 GRC_LCLCTRL_GPIO_OUTPUT0;
16277 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16278 tp->grc_local_ctrl |=
16279 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16281 /* Switch out of Vaux if it is a NIC */
16282 tg3_pwrsrc_switch_to_vmain(tp);
16284 /* Derive initial jumbo mode from MTU assigned in
16285 * ether_setup() via the alloc_etherdev() call
16287 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16288 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16290 /* Determine WakeOnLan speed to use. */
16291 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16292 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16293 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16294 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16295 tg3_flag_clear(tp, WOL_SPEED_100MB);
16297 tg3_flag_set(tp, WOL_SPEED_100MB);
16300 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16301 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16303 /* A few boards don't want Ethernet@WireSpeed phy feature */
16304 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16305 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16306 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16307 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16308 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16309 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16310 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16312 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16313 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16314 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16315 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16316 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16318 if (tg3_flag(tp, 5705_PLUS) &&
16319 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16320 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16321 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16322 !tg3_flag(tp, 57765_PLUS)) {
16323 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16324 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16325 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16326 tg3_asic_rev(tp) == ASIC_REV_5761) {
16327 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16328 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16329 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16330 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16331 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16333 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16336 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16337 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16338 tp->phy_otp = tg3_read_otp_phycfg(tp);
16339 if (tp->phy_otp == 0)
16340 tp->phy_otp = TG3_OTP_DEFAULT;
16343 if (tg3_flag(tp, CPMU_PRESENT))
16344 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16346 tp->mi_mode = MAC_MI_MODE_BASE;
16348 tp->coalesce_mode = 0;
16349 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16350 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16351 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16353 /* Set these bits to enable statistics workaround. */
16354 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16355 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16356 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16357 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16358 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16361 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16362 tg3_asic_rev(tp) == ASIC_REV_57780)
16363 tg3_flag_set(tp, USE_PHYLIB);
16365 err = tg3_mdio_init(tp);
16369 /* Initialize data/descriptor byte/word swapping. */
16370 val = tr32(GRC_MODE);
16371 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16372 tg3_asic_rev(tp) == ASIC_REV_5762)
16373 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16374 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16375 GRC_MODE_B2HRX_ENABLE |
16376 GRC_MODE_HTX2B_ENABLE |
16377 GRC_MODE_HOST_STACKUP);
16379 val &= GRC_MODE_HOST_STACKUP;
16381 tw32(GRC_MODE, val | tp->grc_mode);
16383 tg3_switch_clocks(tp);
16385 /* Clear this out for sanity. */
16386 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16388 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16390 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16391 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16392 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16393 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16394 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16395 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16396 void __iomem *sram_base;
16398 /* Write some dummy words into the SRAM status block
16399 * area, see if it reads back correctly. If the return
16400 * value is bad, force enable the PCIX workaround.
16402 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16404 writel(0x00000000, sram_base);
16405 writel(0x00000000, sram_base + 4);
16406 writel(0xffffffff, sram_base + 4);
16407 if (readl(sram_base) != 0x00000000)
16408 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16413 tg3_nvram_init(tp);
16415 /* If the device has an NVRAM, no need to load patch firmware */
16416 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16417 !tg3_flag(tp, NO_NVRAM))
16418 tp->fw_needed = NULL;
16420 grc_misc_cfg = tr32(GRC_MISC_CFG);
16421 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16423 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16424 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16425 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16426 tg3_flag_set(tp, IS_5788);
16428 if (!tg3_flag(tp, IS_5788) &&
16429 tg3_asic_rev(tp) != ASIC_REV_5700)
16430 tg3_flag_set(tp, TAGGED_STATUS);
16431 if (tg3_flag(tp, TAGGED_STATUS)) {
16432 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16433 HOSTCC_MODE_CLRTICK_TXBD);
16435 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16436 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16437 tp->misc_host_ctrl);
16440 /* Preserve the APE MAC_MODE bits */
16441 if (tg3_flag(tp, ENABLE_APE))
16442 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16446 if (tg3_10_100_only_device(tp, ent))
16447 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16449 err = tg3_phy_probe(tp);
16451 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16452 /* ... but do not return immediately ... */
16457 tg3_read_fw_ver(tp);
16459 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16460 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16462 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16463 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16465 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16468 /* 5700 {AX,BX} chips have a broken status block link
16469 * change bit implementation, so we must use the
16470 * status register in those cases.
16472 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16473 tg3_flag_set(tp, USE_LINKCHG_REG);
16475 tg3_flag_clear(tp, USE_LINKCHG_REG);
16477 /* The led_ctrl is set during tg3_phy_probe, here we might
16478 * have to force the link status polling mechanism based
16479 * upon subsystem IDs.
16481 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16482 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16483 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16484 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16485 tg3_flag_set(tp, USE_LINKCHG_REG);
16488 /* For all SERDES we poll the MAC status register. */
16489 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16490 tg3_flag_set(tp, POLL_SERDES);
16492 tg3_flag_clear(tp, POLL_SERDES);
16494 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16495 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16496 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16497 tg3_flag(tp, PCIX_MODE)) {
16498 tp->rx_offset = NET_SKB_PAD;
16499 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16500 tp->rx_copy_thresh = ~(u16)0;
16504 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16505 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16506 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16508 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16510 /* Increment the rx prod index on the rx std ring by at most
16511 * 8 for these chips to workaround hw errata.
16513 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16514 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16515 tg3_asic_rev(tp) == ASIC_REV_5755)
16516 tp->rx_std_max_post = 8;
16518 if (tg3_flag(tp, ASPM_WORKAROUND))
16519 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16520 PCIE_PWR_MGMT_L1_THRESH_MSK;
16525 #ifdef CONFIG_SPARC
16526 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16528 struct net_device *dev = tp->dev;
16529 struct pci_dev *pdev = tp->pdev;
16530 struct device_node *dp = pci_device_to_OF_node(pdev);
16531 const unsigned char *addr;
16534 addr = of_get_property(dp, "local-mac-address", &len);
16535 if (addr && len == 6) {
16536 memcpy(dev->dev_addr, addr, 6);
16542 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16544 struct net_device *dev = tp->dev;
16546 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16551 static int tg3_get_device_address(struct tg3 *tp)
16553 struct net_device *dev = tp->dev;
16554 u32 hi, lo, mac_offset;
16558 #ifdef CONFIG_SPARC
16559 if (!tg3_get_macaddr_sparc(tp))
16563 if (tg3_flag(tp, IS_SSB_CORE)) {
16564 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16565 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16570 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16571 tg3_flag(tp, 5780_CLASS)) {
16572 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16574 if (tg3_nvram_lock(tp))
16575 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16577 tg3_nvram_unlock(tp);
16578 } else if (tg3_flag(tp, 5717_PLUS)) {
16579 if (tp->pci_fn & 1)
16581 if (tp->pci_fn > 1)
16582 mac_offset += 0x18c;
16583 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16586 /* First try to get it from MAC address mailbox. */
16587 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16588 if ((hi >> 16) == 0x484b) {
16589 dev->dev_addr[0] = (hi >> 8) & 0xff;
16590 dev->dev_addr[1] = (hi >> 0) & 0xff;
16592 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16593 dev->dev_addr[2] = (lo >> 24) & 0xff;
16594 dev->dev_addr[3] = (lo >> 16) & 0xff;
16595 dev->dev_addr[4] = (lo >> 8) & 0xff;
16596 dev->dev_addr[5] = (lo >> 0) & 0xff;
16598 /* Some old bootcode may report a 0 MAC address in SRAM */
16599 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16602 /* Next, try NVRAM. */
16603 if (!tg3_flag(tp, NO_NVRAM) &&
16604 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16605 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16606 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16607 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16609 /* Finally just fetch it out of the MAC control regs. */
16611 hi = tr32(MAC_ADDR_0_HIGH);
16612 lo = tr32(MAC_ADDR_0_LOW);
16614 dev->dev_addr[5] = lo & 0xff;
16615 dev->dev_addr[4] = (lo >> 8) & 0xff;
16616 dev->dev_addr[3] = (lo >> 16) & 0xff;
16617 dev->dev_addr[2] = (lo >> 24) & 0xff;
16618 dev->dev_addr[1] = hi & 0xff;
16619 dev->dev_addr[0] = (hi >> 8) & 0xff;
16623 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16624 #ifdef CONFIG_SPARC
16625 if (!tg3_get_default_macaddr_sparc(tp))
16633 #define BOUNDARY_SINGLE_CACHELINE 1
16634 #define BOUNDARY_MULTI_CACHELINE 2
16636 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16638 int cacheline_size;
16642 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16644 cacheline_size = 1024;
16646 cacheline_size = (int) byte * 4;
16648 /* On 5703 and later chips, the boundary bits have no
16651 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16652 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16653 !tg3_flag(tp, PCI_EXPRESS))
16656 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16657 goal = BOUNDARY_MULTI_CACHELINE;
16659 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16660 goal = BOUNDARY_SINGLE_CACHELINE;
16666 if (tg3_flag(tp, 57765_PLUS)) {
16667 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16674 /* PCI controllers on most RISC systems tend to disconnect
16675 * when a device tries to burst across a cache-line boundary.
16676 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16678 * Unfortunately, for PCI-E there are only limited
16679 * write-side controls for this, and thus for reads
16680 * we will still get the disconnects. We'll also waste
16681 * these PCI cycles for both read and write for chips
16682 * other than 5700 and 5701 which do not implement the
16685 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16686 switch (cacheline_size) {
16691 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16692 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16693 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16695 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16696 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16701 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16702 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16706 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16707 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16710 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16711 switch (cacheline_size) {
16715 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16716 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16717 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16723 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16724 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16728 switch (cacheline_size) {
16730 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16731 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16732 DMA_RWCTRL_WRITE_BNDRY_16);
16737 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16738 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16739 DMA_RWCTRL_WRITE_BNDRY_32);
16744 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16745 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16746 DMA_RWCTRL_WRITE_BNDRY_64);
16751 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16752 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16753 DMA_RWCTRL_WRITE_BNDRY_128);
16758 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16759 DMA_RWCTRL_WRITE_BNDRY_256);
16762 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16763 DMA_RWCTRL_WRITE_BNDRY_512);
16767 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16768 DMA_RWCTRL_WRITE_BNDRY_1024);
16777 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16778 int size, bool to_device)
16780 struct tg3_internal_buffer_desc test_desc;
16781 u32 sram_dma_descs;
16784 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16786 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16787 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16788 tw32(RDMAC_STATUS, 0);
16789 tw32(WDMAC_STATUS, 0);
16791 tw32(BUFMGR_MODE, 0);
16792 tw32(FTQ_RESET, 0);
16794 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16795 test_desc.addr_lo = buf_dma & 0xffffffff;
16796 test_desc.nic_mbuf = 0x00002100;
16797 test_desc.len = size;
16800 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16801 * the *second* time the tg3 driver was getting loaded after an
16804 * Broadcom tells me:
16805 * ...the DMA engine is connected to the GRC block and a DMA
16806 * reset may affect the GRC block in some unpredictable way...
16807 * The behavior of resets to individual blocks has not been tested.
16809 * Broadcom noted the GRC reset will also reset all sub-components.
16812 test_desc.cqid_sqid = (13 << 8) | 2;
16814 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16817 test_desc.cqid_sqid = (16 << 8) | 7;
16819 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16822 test_desc.flags = 0x00000005;
16824 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16827 val = *(((u32 *)&test_desc) + i);
16828 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16829 sram_dma_descs + (i * sizeof(u32)));
16830 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16832 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16835 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16837 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16840 for (i = 0; i < 40; i++) {
16844 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16846 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16847 if ((val & 0xffff) == sram_dma_descs) {
16858 #define TEST_BUFFER_SIZE 0x2000
16860 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16861 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16865 static int tg3_test_dma(struct tg3 *tp)
16867 dma_addr_t buf_dma;
16868 u32 *buf, saved_dma_rwctrl;
16871 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16872 &buf_dma, GFP_KERNEL);
16878 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16879 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16881 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16883 if (tg3_flag(tp, 57765_PLUS))
16886 if (tg3_flag(tp, PCI_EXPRESS)) {
16887 /* DMA read watermark not used on PCIE */
16888 tp->dma_rwctrl |= 0x00180000;
16889 } else if (!tg3_flag(tp, PCIX_MODE)) {
16890 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16891 tg3_asic_rev(tp) == ASIC_REV_5750)
16892 tp->dma_rwctrl |= 0x003f0000;
16894 tp->dma_rwctrl |= 0x003f000f;
16896 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16897 tg3_asic_rev(tp) == ASIC_REV_5704) {
16898 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16899 u32 read_water = 0x7;
16901 /* If the 5704 is behind the EPB bridge, we can
16902 * do the less restrictive ONE_DMA workaround for
16903 * better performance.
16905 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16906 tg3_asic_rev(tp) == ASIC_REV_5704)
16907 tp->dma_rwctrl |= 0x8000;
16908 else if (ccval == 0x6 || ccval == 0x7)
16909 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16911 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16913 /* Set bit 23 to enable PCIX hw bug fix */
16915 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16916 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16918 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16919 /* 5780 always in PCIX mode */
16920 tp->dma_rwctrl |= 0x00144000;
16921 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16922 /* 5714 always in PCIX mode */
16923 tp->dma_rwctrl |= 0x00148000;
16925 tp->dma_rwctrl |= 0x001b000f;
16928 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16929 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16931 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16932 tg3_asic_rev(tp) == ASIC_REV_5704)
16933 tp->dma_rwctrl &= 0xfffffff0;
16935 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16936 tg3_asic_rev(tp) == ASIC_REV_5701) {
16937 /* Remove this if it causes problems for some boards. */
16938 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16940 /* On 5700/5701 chips, we need to set this bit.
16941 * Otherwise the chip will issue cacheline transactions
16942 * to streamable DMA memory with not all the byte
16943 * enables turned on. This is an error on several
16944 * RISC PCI controllers, in particular sparc64.
16946 * On 5703/5704 chips, this bit has been reassigned
16947 * a different meaning. In particular, it is used
16948 * on those chips to enable a PCI-X workaround.
16950 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16953 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16956 /* Unneeded, already done by tg3_get_invariants. */
16957 tg3_switch_clocks(tp);
16960 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16961 tg3_asic_rev(tp) != ASIC_REV_5701)
16964 /* It is best to perform DMA test with maximum write burst size
16965 * to expose the 5700/5701 write DMA bug.
16967 saved_dma_rwctrl = tp->dma_rwctrl;
16968 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16969 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16974 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16977 /* Send the buffer to the chip. */
16978 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16980 dev_err(&tp->pdev->dev,
16981 "%s: Buffer write failed. err = %d\n",
16987 /* validate data reached card RAM correctly. */
16988 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16990 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16991 if (le32_to_cpu(val) != p[i]) {
16992 dev_err(&tp->pdev->dev,
16993 "%s: Buffer corrupted on device! "
16994 "(%d != %d)\n", __func__, val, i);
16995 /* ret = -ENODEV here? */
17000 /* Now read it back. */
17001 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17003 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17004 "err = %d\n", __func__, ret);
17009 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17013 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17014 DMA_RWCTRL_WRITE_BNDRY_16) {
17015 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17016 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17017 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17020 dev_err(&tp->pdev->dev,
17021 "%s: Buffer corrupted on read back! "
17022 "(%d != %d)\n", __func__, p[i], i);
17028 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17034 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17035 DMA_RWCTRL_WRITE_BNDRY_16) {
17036 /* DMA test passed without adjusting DMA boundary,
17037 * now look for chipsets that are known to expose the
17038 * DMA bug without failing the test.
17040 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17041 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17042 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17044 /* Safe to use the calculated DMA boundary. */
17045 tp->dma_rwctrl = saved_dma_rwctrl;
17048 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17052 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17057 static void tg3_init_bufmgr_config(struct tg3 *tp)
17059 if (tg3_flag(tp, 57765_PLUS)) {
17060 tp->bufmgr_config.mbuf_read_dma_low_water =
17061 DEFAULT_MB_RDMA_LOW_WATER_5705;
17062 tp->bufmgr_config.mbuf_mac_rx_low_water =
17063 DEFAULT_MB_MACRX_LOW_WATER_57765;
17064 tp->bufmgr_config.mbuf_high_water =
17065 DEFAULT_MB_HIGH_WATER_57765;
17067 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17068 DEFAULT_MB_RDMA_LOW_WATER_5705;
17069 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17070 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17071 tp->bufmgr_config.mbuf_high_water_jumbo =
17072 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17073 } else if (tg3_flag(tp, 5705_PLUS)) {
17074 tp->bufmgr_config.mbuf_read_dma_low_water =
17075 DEFAULT_MB_RDMA_LOW_WATER_5705;
17076 tp->bufmgr_config.mbuf_mac_rx_low_water =
17077 DEFAULT_MB_MACRX_LOW_WATER_5705;
17078 tp->bufmgr_config.mbuf_high_water =
17079 DEFAULT_MB_HIGH_WATER_5705;
17080 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17081 tp->bufmgr_config.mbuf_mac_rx_low_water =
17082 DEFAULT_MB_MACRX_LOW_WATER_5906;
17083 tp->bufmgr_config.mbuf_high_water =
17084 DEFAULT_MB_HIGH_WATER_5906;
17087 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17088 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17089 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17090 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17091 tp->bufmgr_config.mbuf_high_water_jumbo =
17092 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17094 tp->bufmgr_config.mbuf_read_dma_low_water =
17095 DEFAULT_MB_RDMA_LOW_WATER;
17096 tp->bufmgr_config.mbuf_mac_rx_low_water =
17097 DEFAULT_MB_MACRX_LOW_WATER;
17098 tp->bufmgr_config.mbuf_high_water =
17099 DEFAULT_MB_HIGH_WATER;
17101 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17102 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17103 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17104 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17105 tp->bufmgr_config.mbuf_high_water_jumbo =
17106 DEFAULT_MB_HIGH_WATER_JUMBO;
17109 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17110 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17113 static char *tg3_phy_string(struct tg3 *tp)
17115 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17116 case TG3_PHY_ID_BCM5400: return "5400";
17117 case TG3_PHY_ID_BCM5401: return "5401";
17118 case TG3_PHY_ID_BCM5411: return "5411";
17119 case TG3_PHY_ID_BCM5701: return "5701";
17120 case TG3_PHY_ID_BCM5703: return "5703";
17121 case TG3_PHY_ID_BCM5704: return "5704";
17122 case TG3_PHY_ID_BCM5705: return "5705";
17123 case TG3_PHY_ID_BCM5750: return "5750";
17124 case TG3_PHY_ID_BCM5752: return "5752";
17125 case TG3_PHY_ID_BCM5714: return "5714";
17126 case TG3_PHY_ID_BCM5780: return "5780";
17127 case TG3_PHY_ID_BCM5755: return "5755";
17128 case TG3_PHY_ID_BCM5787: return "5787";
17129 case TG3_PHY_ID_BCM5784: return "5784";
17130 case TG3_PHY_ID_BCM5756: return "5722/5756";
17131 case TG3_PHY_ID_BCM5906: return "5906";
17132 case TG3_PHY_ID_BCM5761: return "5761";
17133 case TG3_PHY_ID_BCM5718C: return "5718C";
17134 case TG3_PHY_ID_BCM5718S: return "5718S";
17135 case TG3_PHY_ID_BCM57765: return "57765";
17136 case TG3_PHY_ID_BCM5719C: return "5719C";
17137 case TG3_PHY_ID_BCM5720C: return "5720C";
17138 case TG3_PHY_ID_BCM5762: return "5762C";
17139 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17140 case 0: return "serdes";
17141 default: return "unknown";
17145 static char *tg3_bus_string(struct tg3 *tp, char *str)
17147 if (tg3_flag(tp, PCI_EXPRESS)) {
17148 strcpy(str, "PCI Express");
17150 } else if (tg3_flag(tp, PCIX_MODE)) {
17151 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17153 strcpy(str, "PCIX:");
17155 if ((clock_ctrl == 7) ||
17156 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17157 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17158 strcat(str, "133MHz");
17159 else if (clock_ctrl == 0)
17160 strcat(str, "33MHz");
17161 else if (clock_ctrl == 2)
17162 strcat(str, "50MHz");
17163 else if (clock_ctrl == 4)
17164 strcat(str, "66MHz");
17165 else if (clock_ctrl == 6)
17166 strcat(str, "100MHz");
17168 strcpy(str, "PCI:");
17169 if (tg3_flag(tp, PCI_HIGH_SPEED))
17170 strcat(str, "66MHz");
17172 strcat(str, "33MHz");
17174 if (tg3_flag(tp, PCI_32BIT))
17175 strcat(str, ":32-bit");
17177 strcat(str, ":64-bit");
17181 static void tg3_init_coal(struct tg3 *tp)
17183 struct ethtool_coalesce *ec = &tp->coal;
17185 memset(ec, 0, sizeof(*ec));
17186 ec->cmd = ETHTOOL_GCOALESCE;
17187 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17188 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17189 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17190 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17191 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17192 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17193 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17194 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17195 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17197 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17198 HOSTCC_MODE_CLRTICK_TXBD)) {
17199 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17200 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17201 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17202 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17205 if (tg3_flag(tp, 5705_PLUS)) {
17206 ec->rx_coalesce_usecs_irq = 0;
17207 ec->tx_coalesce_usecs_irq = 0;
17208 ec->stats_block_coalesce_usecs = 0;
17212 static int tg3_init_one(struct pci_dev *pdev,
17213 const struct pci_device_id *ent)
17215 struct net_device *dev;
17218 u32 sndmbx, rcvmbx, intmbx;
17220 u64 dma_mask, persist_dma_mask;
17221 netdev_features_t features = 0;
17223 printk_once(KERN_INFO "%s\n", version);
17225 err = pci_enable_device(pdev);
17227 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17231 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17233 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17234 goto err_out_disable_pdev;
17237 pci_set_master(pdev);
17239 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17242 goto err_out_free_res;
17245 SET_NETDEV_DEV(dev, &pdev->dev);
17247 tp = netdev_priv(dev);
17250 tp->pm_cap = pdev->pm_cap;
17251 tp->rx_mode = TG3_DEF_RX_MODE;
17252 tp->tx_mode = TG3_DEF_TX_MODE;
17256 tp->msg_enable = tg3_debug;
17258 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17260 if (pdev_is_ssb_gige_core(pdev)) {
17261 tg3_flag_set(tp, IS_SSB_CORE);
17262 if (ssb_gige_must_flush_posted_writes(pdev))
17263 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17264 if (ssb_gige_one_dma_at_once(pdev))
17265 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17266 if (ssb_gige_have_roboswitch(pdev))
17267 tg3_flag_set(tp, ROBOSWITCH);
17268 if (ssb_gige_is_rgmii(pdev))
17269 tg3_flag_set(tp, RGMII_MODE);
17272 /* The word/byte swap controls here control register access byte
17273 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17276 tp->misc_host_ctrl =
17277 MISC_HOST_CTRL_MASK_PCI_INT |
17278 MISC_HOST_CTRL_WORD_SWAP |
17279 MISC_HOST_CTRL_INDIR_ACCESS |
17280 MISC_HOST_CTRL_PCISTATE_RW;
17282 /* The NONFRM (non-frame) byte/word swap controls take effect
17283 * on descriptor entries, anything which isn't packet data.
17285 * The StrongARM chips on the board (one for tx, one for rx)
17286 * are running in big-endian mode.
17288 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17289 GRC_MODE_WSWAP_NONFRM_DATA);
17290 #ifdef __BIG_ENDIAN
17291 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17293 spin_lock_init(&tp->lock);
17294 spin_lock_init(&tp->indirect_lock);
17295 INIT_WORK(&tp->reset_task, tg3_reset_task);
17297 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17299 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17301 goto err_out_free_dev;
17304 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17305 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17306 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17307 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17308 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17309 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17310 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17311 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17312 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17313 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17314 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17315 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17316 tg3_flag_set(tp, ENABLE_APE);
17317 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17318 if (!tp->aperegs) {
17319 dev_err(&pdev->dev,
17320 "Cannot map APE registers, aborting\n");
17322 goto err_out_iounmap;
17326 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17327 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17329 dev->ethtool_ops = &tg3_ethtool_ops;
17330 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17331 dev->netdev_ops = &tg3_netdev_ops;
17332 dev->irq = pdev->irq;
17334 err = tg3_get_invariants(tp, ent);
17336 dev_err(&pdev->dev,
17337 "Problem fetching invariants of chip, aborting\n");
17338 goto err_out_apeunmap;
17341 /* The EPB bridge inside 5714, 5715, and 5780 and any
17342 * device behind the EPB cannot support DMA addresses > 40-bit.
17343 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17344 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17345 * do DMA address check in tg3_start_xmit().
17347 if (tg3_flag(tp, IS_5788))
17348 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17349 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17350 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17351 #ifdef CONFIG_HIGHMEM
17352 dma_mask = DMA_BIT_MASK(64);
17355 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17357 /* Configure DMA attributes. */
17358 if (dma_mask > DMA_BIT_MASK(32)) {
17359 err = pci_set_dma_mask(pdev, dma_mask);
17361 features |= NETIF_F_HIGHDMA;
17362 err = pci_set_consistent_dma_mask(pdev,
17365 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17366 "DMA for consistent allocations\n");
17367 goto err_out_apeunmap;
17371 if (err || dma_mask == DMA_BIT_MASK(32)) {
17372 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17374 dev_err(&pdev->dev,
17375 "No usable DMA configuration, aborting\n");
17376 goto err_out_apeunmap;
17380 tg3_init_bufmgr_config(tp);
17382 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17384 /* 5700 B0 chips do not support checksumming correctly due
17385 * to hardware bugs.
17387 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17388 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17390 if (tg3_flag(tp, 5755_PLUS))
17391 features |= NETIF_F_IPV6_CSUM;
17394 /* TSO is on by default on chips that support hardware TSO.
17395 * Firmware TSO on older chips gives lower performance, so it
17396 * is off by default, but can be enabled using ethtool.
17398 if ((tg3_flag(tp, HW_TSO_1) ||
17399 tg3_flag(tp, HW_TSO_2) ||
17400 tg3_flag(tp, HW_TSO_3)) &&
17401 (features & NETIF_F_IP_CSUM))
17402 features |= NETIF_F_TSO;
17403 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17404 if (features & NETIF_F_IPV6_CSUM)
17405 features |= NETIF_F_TSO6;
17406 if (tg3_flag(tp, HW_TSO_3) ||
17407 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17408 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17409 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17410 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17411 tg3_asic_rev(tp) == ASIC_REV_57780)
17412 features |= NETIF_F_TSO_ECN;
17415 dev->features |= features;
17416 dev->vlan_features |= features;
17419 * Add loopback capability only for a subset of devices that support
17420 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17421 * loopback for the remaining devices.
17423 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17424 !tg3_flag(tp, CPMU_PRESENT))
17425 /* Add the loopback capability */
17426 features |= NETIF_F_LOOPBACK;
17428 dev->hw_features |= features;
17430 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17431 !tg3_flag(tp, TSO_CAPABLE) &&
17432 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17433 tg3_flag_set(tp, MAX_RXPEND_64);
17434 tp->rx_pending = 63;
17437 err = tg3_get_device_address(tp);
17439 dev_err(&pdev->dev,
17440 "Could not obtain valid ethernet address, aborting\n");
17441 goto err_out_apeunmap;
17445 * Reset chip in case UNDI or EFI driver did not shutdown
17446 * DMA self test will enable WDMAC and we'll see (spurious)
17447 * pending DMA on the PCI bus at that point.
17449 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17450 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17451 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17452 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17455 err = tg3_test_dma(tp);
17457 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17458 goto err_out_apeunmap;
17461 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17462 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17463 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17464 for (i = 0; i < tp->irq_max; i++) {
17465 struct tg3_napi *tnapi = &tp->napi[i];
17468 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17470 tnapi->int_mbox = intmbx;
17476 tnapi->consmbox = rcvmbx;
17477 tnapi->prodmbox = sndmbx;
17480 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17482 tnapi->coal_now = HOSTCC_MODE_NOW;
17484 if (!tg3_flag(tp, SUPPORT_MSIX))
17488 * If we support MSIX, we'll be using RSS. If we're using
17489 * RSS, the first vector only handles link interrupts and the
17490 * remaining vectors handle rx and tx interrupts. Reuse the
17491 * mailbox values for the next iteration. The values we setup
17492 * above are still useful for the single vectored mode.
17507 pci_set_drvdata(pdev, dev);
17509 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17510 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17511 tg3_asic_rev(tp) == ASIC_REV_5762)
17512 tg3_flag_set(tp, PTP_CAPABLE);
17514 if (tg3_flag(tp, 5717_PLUS)) {
17515 /* Resume a low-power mode */
17516 tg3_frob_aux_power(tp, false);
17519 tg3_timer_init(tp);
17521 tg3_carrier_off(tp);
17523 err = register_netdev(dev);
17525 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17526 goto err_out_apeunmap;
17529 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17530 tp->board_part_number,
17531 tg3_chip_rev_id(tp),
17532 tg3_bus_string(tp, str),
17535 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17536 struct phy_device *phydev;
17537 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17539 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17540 phydev->drv->name, dev_name(&phydev->dev));
17544 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17545 ethtype = "10/100Base-TX";
17546 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17547 ethtype = "1000Base-SX";
17549 ethtype = "10/100/1000Base-T";
17551 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17552 "(WireSpeed[%d], EEE[%d])\n",
17553 tg3_phy_string(tp), ethtype,
17554 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17555 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17558 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17559 (dev->features & NETIF_F_RXCSUM) != 0,
17560 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17561 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17562 tg3_flag(tp, ENABLE_ASF) != 0,
17563 tg3_flag(tp, TSO_CAPABLE) != 0);
17564 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17566 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17567 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17569 pci_save_state(pdev);
17575 iounmap(tp->aperegs);
17576 tp->aperegs = NULL;
17589 pci_release_regions(pdev);
17591 err_out_disable_pdev:
17592 pci_disable_device(pdev);
17593 pci_set_drvdata(pdev, NULL);
17597 static void tg3_remove_one(struct pci_dev *pdev)
17599 struct net_device *dev = pci_get_drvdata(pdev);
17602 struct tg3 *tp = netdev_priv(dev);
17604 release_firmware(tp->fw);
17606 tg3_reset_task_cancel(tp);
17608 if (tg3_flag(tp, USE_PHYLIB)) {
17613 unregister_netdev(dev);
17615 iounmap(tp->aperegs);
17616 tp->aperegs = NULL;
17623 pci_release_regions(pdev);
17624 pci_disable_device(pdev);
17625 pci_set_drvdata(pdev, NULL);
17629 #ifdef CONFIG_PM_SLEEP
17630 static int tg3_suspend(struct device *device)
17632 struct pci_dev *pdev = to_pci_dev(device);
17633 struct net_device *dev = pci_get_drvdata(pdev);
17634 struct tg3 *tp = netdev_priv(dev);
17637 if (!netif_running(dev))
17640 tg3_reset_task_cancel(tp);
17642 tg3_netif_stop(tp);
17644 tg3_timer_stop(tp);
17646 tg3_full_lock(tp, 1);
17647 tg3_disable_ints(tp);
17648 tg3_full_unlock(tp);
17650 netif_device_detach(dev);
17652 tg3_full_lock(tp, 0);
17653 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17654 tg3_flag_clear(tp, INIT_COMPLETE);
17655 tg3_full_unlock(tp);
17657 err = tg3_power_down_prepare(tp);
17661 tg3_full_lock(tp, 0);
17663 tg3_flag_set(tp, INIT_COMPLETE);
17664 err2 = tg3_restart_hw(tp, true);
17668 tg3_timer_start(tp);
17670 netif_device_attach(dev);
17671 tg3_netif_start(tp);
17674 tg3_full_unlock(tp);
17683 static int tg3_resume(struct device *device)
17685 struct pci_dev *pdev = to_pci_dev(device);
17686 struct net_device *dev = pci_get_drvdata(pdev);
17687 struct tg3 *tp = netdev_priv(dev);
17690 if (!netif_running(dev))
17693 netif_device_attach(dev);
17695 tg3_full_lock(tp, 0);
17697 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17699 tg3_flag_set(tp, INIT_COMPLETE);
17700 err = tg3_restart_hw(tp,
17701 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17705 tg3_timer_start(tp);
17707 tg3_netif_start(tp);
17710 tg3_full_unlock(tp);
17717 #endif /* CONFIG_PM_SLEEP */
17719 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17722 * tg3_io_error_detected - called when PCI error is detected
17723 * @pdev: Pointer to PCI device
17724 * @state: The current pci connection state
17726 * This function is called after a PCI bus error affecting
17727 * this device has been detected.
17729 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17730 pci_channel_state_t state)
17732 struct net_device *netdev = pci_get_drvdata(pdev);
17733 struct tg3 *tp = netdev_priv(netdev);
17734 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17736 netdev_info(netdev, "PCI I/O error detected\n");
17740 if (!netif_running(netdev))
17745 tg3_netif_stop(tp);
17747 tg3_timer_stop(tp);
17749 /* Want to make sure that the reset task doesn't run */
17750 tg3_reset_task_cancel(tp);
17752 netif_device_detach(netdev);
17754 /* Clean up software state, even if MMIO is blocked */
17755 tg3_full_lock(tp, 0);
17756 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17757 tg3_full_unlock(tp);
17760 if (state == pci_channel_io_perm_failure) {
17761 tg3_napi_enable(tp);
17763 err = PCI_ERS_RESULT_DISCONNECT;
17765 pci_disable_device(pdev);
17774 * tg3_io_slot_reset - called after the pci bus has been reset.
17775 * @pdev: Pointer to PCI device
17777 * Restart the card from scratch, as if from a cold-boot.
17778 * At this point, the card has exprienced a hard reset,
17779 * followed by fixups by BIOS, and has its config space
17780 * set up identically to what it was at cold boot.
17782 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17784 struct net_device *netdev = pci_get_drvdata(pdev);
17785 struct tg3 *tp = netdev_priv(netdev);
17786 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17791 if (pci_enable_device(pdev)) {
17792 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17796 pci_set_master(pdev);
17797 pci_restore_state(pdev);
17798 pci_save_state(pdev);
17800 if (!netif_running(netdev)) {
17801 rc = PCI_ERS_RESULT_RECOVERED;
17805 err = tg3_power_up(tp);
17809 rc = PCI_ERS_RESULT_RECOVERED;
17812 if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
17813 tg3_napi_enable(tp);
17822 * tg3_io_resume - called when traffic can start flowing again.
17823 * @pdev: Pointer to PCI device
17825 * This callback is called when the error recovery driver tells
17826 * us that its OK to resume normal operation.
17828 static void tg3_io_resume(struct pci_dev *pdev)
17830 struct net_device *netdev = pci_get_drvdata(pdev);
17831 struct tg3 *tp = netdev_priv(netdev);
17836 if (!netif_running(netdev))
17839 tg3_full_lock(tp, 0);
17840 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17841 tg3_flag_set(tp, INIT_COMPLETE);
17842 err = tg3_restart_hw(tp, true);
17844 tg3_full_unlock(tp);
17845 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17849 netif_device_attach(netdev);
17851 tg3_timer_start(tp);
17853 tg3_netif_start(tp);
17855 tg3_full_unlock(tp);
17863 static const struct pci_error_handlers tg3_err_handler = {
17864 .error_detected = tg3_io_error_detected,
17865 .slot_reset = tg3_io_slot_reset,
17866 .resume = tg3_io_resume
17869 static struct pci_driver tg3_driver = {
17870 .name = DRV_MODULE_NAME,
17871 .id_table = tg3_pci_tbl,
17872 .probe = tg3_init_one,
17873 .remove = tg3_remove_one,
17874 .err_handler = &tg3_err_handler,
17875 .driver.pm = &tg3_pm_ops,
17878 module_pci_driver(tg3_driver);