2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 122
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "December 7, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
139 /* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
146 #define TG3_TX_RING_SIZE 512
147 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159 #define TG3_DMA_BYTE_ENAB 64
161 #define TG3_RX_STD_DMA_SZ 1536
162 #define TG3_RX_JMB_DMA_SZ 9046
164 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
186 #define TG3_RX_COPY_THRESHOLD 256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K 2048
202 #define TG3_TX_BD_DMA_MAX_4K 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
207 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209 #define FIRMWARE_TG3 "tigon/tg3.bin"
210 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
211 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213 static char version[] __devinitdata =
214 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
216 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
217 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(DRV_MODULE_VERSION);
220 MODULE_FIRMWARE(FIRMWARE_TG3);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
315 static const struct {
316 const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
320 { "rx_ucast_packets" },
321 { "rx_mcast_packets" },
322 { "rx_bcast_packets" },
324 { "rx_align_errors" },
325 { "rx_xon_pause_rcvd" },
326 { "rx_xoff_pause_rcvd" },
327 { "rx_mac_ctrl_rcvd" },
328 { "rx_xoff_entered" },
329 { "rx_frame_too_long_errors" },
331 { "rx_undersize_packets" },
332 { "rx_in_length_errors" },
333 { "rx_out_length_errors" },
334 { "rx_64_or_less_octet_packets" },
335 { "rx_65_to_127_octet_packets" },
336 { "rx_128_to_255_octet_packets" },
337 { "rx_256_to_511_octet_packets" },
338 { "rx_512_to_1023_octet_packets" },
339 { "rx_1024_to_1522_octet_packets" },
340 { "rx_1523_to_2047_octet_packets" },
341 { "rx_2048_to_4095_octet_packets" },
342 { "rx_4096_to_8191_octet_packets" },
343 { "rx_8192_to_9022_octet_packets" },
350 { "tx_flow_control" },
352 { "tx_single_collisions" },
353 { "tx_mult_collisions" },
355 { "tx_excessive_collisions" },
356 { "tx_late_collisions" },
357 { "tx_collide_2times" },
358 { "tx_collide_3times" },
359 { "tx_collide_4times" },
360 { "tx_collide_5times" },
361 { "tx_collide_6times" },
362 { "tx_collide_7times" },
363 { "tx_collide_8times" },
364 { "tx_collide_9times" },
365 { "tx_collide_10times" },
366 { "tx_collide_11times" },
367 { "tx_collide_12times" },
368 { "tx_collide_13times" },
369 { "tx_collide_14times" },
370 { "tx_collide_15times" },
371 { "tx_ucast_packets" },
372 { "tx_mcast_packets" },
373 { "tx_bcast_packets" },
374 { "tx_carrier_sense_errors" },
378 { "dma_writeq_full" },
379 { "dma_write_prioq_full" },
383 { "rx_threshold_hit" },
385 { "dma_readq_full" },
386 { "dma_read_prioq_full" },
387 { "tx_comp_queue_full" },
389 { "ring_set_send_prod_index" },
390 { "ring_status_update" },
392 { "nic_avoided_irqs" },
393 { "nic_tx_threshold_hit" },
395 { "mbuf_lwm_thresh_hit" },
398 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
401 static const struct {
402 const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404 { "nvram test (online) " },
405 { "link test (online) " },
406 { "register test (offline)" },
407 { "memory test (offline)" },
408 { "mac loopback test (offline)" },
409 { "phy loopback test (offline)" },
410 { "ext loopback test (offline)" },
411 { "interrupt test (offline)" },
414 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
419 writel(val, tp->regs + off);
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
424 return readl(tp->regs + off);
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
429 writel(val, tp->aperegs + off);
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
434 return readl(tp->aperegs + off);
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
449 writel(val, tp->regs + off);
450 readl(tp->regs + off);
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 spin_lock_irqsave(&tp->indirect_lock, flags);
459 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461 spin_unlock_irqrestore(&tp->indirect_lock, flags);
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
469 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471 TG3_64BIT_REG_LOW, val);
474 if (off == TG3_RX_STD_PROD_IDX_REG) {
475 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476 TG3_64BIT_REG_LOW, val);
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
485 /* In indirect mode when disabling interrupts, we also need
486 * to clear the interrupt bit in the GRC local ctrl register.
488 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
490 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508 * where it is unsafe to read back the register without some delay.
509 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
514 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515 /* Non-posted methods */
516 tp->write32(tp, off, val);
519 tg3_write32(tp, off, val);
524 /* Wait again after the read for the posted method to guarantee that
525 * the wait time is met.
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
533 tp->write32_mbox(tp, off, val);
534 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535 tp->read32_mbox(tp, off);
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
540 void __iomem *mbox = tp->regs + off;
542 if (tg3_flag(tp, TXD_MBOX_HWBUG))
544 if (tg3_flag(tp, MBOX_WRITE_REORDER))
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
550 return readl(tp->regs + off + GRCMBOX_BASE);
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
555 writel(val, tp->regs + off + GRCMBOX_BASE);
558 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
564 #define tw32(reg, val) tp->write32(tp, reg, val)
565 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg) tp->read32(tp, reg)
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
577 spin_lock_irqsave(&tp->indirect_lock, flags);
578 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
582 /* Always leave this as zero. */
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
585 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586 tw32_f(TG3PCI_MEM_WIN_DATA, val);
588 /* Always leave this as zero. */
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
591 spin_unlock_irqrestore(&tp->indirect_lock, flags);
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604 spin_lock_irqsave(&tp->indirect_lock, flags);
605 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
609 /* Always leave this as zero. */
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
612 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613 *val = tr32(TG3PCI_MEM_WIN_DATA);
615 /* Always leave this as zero. */
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
618 spin_unlock_irqrestore(&tp->indirect_lock, flags);
621 static void tg3_ape_lock_init(struct tg3 *tp)
626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627 regbase = TG3_APE_LOCK_GRANT;
629 regbase = TG3_APE_PER_LOCK_GRANT;
631 /* Make sure the driver hasn't any stale locks. */
632 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
634 case TG3_APE_LOCK_PHY0:
635 case TG3_APE_LOCK_PHY1:
636 case TG3_APE_LOCK_PHY2:
637 case TG3_APE_LOCK_PHY3:
638 bit = APE_LOCK_GRANT_DRIVER;
642 bit = APE_LOCK_GRANT_DRIVER;
644 bit = 1 << tp->pci_fn;
646 tg3_ape_write32(tp, regbase + 4 * i, bit);
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
655 u32 status, req, gnt, bit;
657 if (!tg3_flag(tp, ENABLE_APE))
661 case TG3_APE_LOCK_GPIO:
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
664 case TG3_APE_LOCK_GRC:
665 case TG3_APE_LOCK_MEM:
667 bit = APE_LOCK_REQ_DRIVER;
669 bit = 1 << tp->pci_fn;
675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676 req = TG3_APE_LOCK_REQ;
677 gnt = TG3_APE_LOCK_GRANT;
679 req = TG3_APE_PER_LOCK_REQ;
680 gnt = TG3_APE_PER_LOCK_GRANT;
685 tg3_ape_write32(tp, req + off, bit);
687 /* Wait for up to 1 millisecond to acquire lock. */
688 for (i = 0; i < 100; i++) {
689 status = tg3_ape_read32(tp, gnt + off);
696 /* Revoke the lock request. */
697 tg3_ape_write32(tp, gnt + off, bit);
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
708 if (!tg3_flag(tp, ENABLE_APE))
712 case TG3_APE_LOCK_GPIO:
713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
715 case TG3_APE_LOCK_GRC:
716 case TG3_APE_LOCK_MEM:
718 bit = APE_LOCK_GRANT_DRIVER;
720 bit = 1 << tp->pci_fn;
726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727 gnt = TG3_APE_LOCK_GRANT;
729 gnt = TG3_APE_PER_LOCK_GRANT;
731 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
739 /* NCSI does not support APE events */
740 if (tg3_flag(tp, APE_HAS_NCSI))
743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744 if (apedata != APE_SEG_SIG_MAGIC)
747 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748 if (!(apedata & APE_FW_STATUS_READY))
751 /* Wait for up to 1 millisecond for APE to service previous event. */
752 for (i = 0; i < 10; i++) {
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
758 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760 event | APE_EVENT_STATUS_EVENT_PENDING);
762 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
764 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
779 if (!tg3_flag(tp, ENABLE_APE))
783 case RESET_KIND_INIT:
784 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785 APE_HOST_SEG_SIG_MAGIC);
786 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787 APE_HOST_SEG_LEN_MAGIC);
788 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793 APE_HOST_BEHAV_NO_PHYLOCK);
794 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795 TG3_APE_HOST_DRVR_STATE_START);
797 event = APE_EVENT_STATUS_STATE_START;
799 case RESET_KIND_SHUTDOWN:
800 /* With the interface we are currently using,
801 * APE does not track driver state. Wiping
802 * out the HOST SEGMENT SIGNATURE forces
803 * the APE to assume OS absent status.
805 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
807 if (device_may_wakeup(&tp->pdev->dev) &&
808 tg3_flag(tp, WOL_ENABLE)) {
809 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810 TG3_APE_HOST_WOL_SPEED_AUTO);
811 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
813 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
815 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
817 event = APE_EVENT_STATUS_STATE_UNLOAD;
819 case RESET_KIND_SUSPEND:
820 event = APE_EVENT_STATUS_STATE_SUSPEND;
826 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
828 tg3_ape_send_event(tp, event);
831 static void tg3_disable_ints(struct tg3 *tp)
835 tw32(TG3PCI_MISC_HOST_CTRL,
836 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837 for (i = 0; i < tp->irq_max; i++)
838 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
841 static void tg3_enable_ints(struct tg3 *tp)
848 tw32(TG3PCI_MISC_HOST_CTRL,
849 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
851 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852 for (i = 0; i < tp->irq_cnt; i++) {
853 struct tg3_napi *tnapi = &tp->napi[i];
855 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 if (tg3_flag(tp, 1SHOT_MSI))
857 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
859 tp->coal_now |= tnapi->coal_now;
862 /* Force an initial interrupt */
863 if (!tg3_flag(tp, TAGGED_STATUS) &&
864 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
867 tw32(HOSTCC_MODE, tp->coal_now);
869 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
874 struct tg3 *tp = tnapi->tp;
875 struct tg3_hw_status *sblk = tnapi->hw_status;
876 unsigned int work_exists = 0;
878 /* check for phy events */
879 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880 if (sblk->status & SD_STATUS_LINK_CHG)
883 /* check for RX/TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
885 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
892 * similar to tg3_enable_ints, but it accurately determines whether there
893 * is new work pending and can return without flushing the PIO write
894 * which reenables interrupts
896 static void tg3_int_reenable(struct tg3_napi *tnapi)
898 struct tg3 *tp = tnapi->tp;
900 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
903 /* When doing tagged status, this work check is unnecessary.
904 * The last_tag we write above tells the chip which piece of
905 * work we've completed.
907 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
908 tw32(HOSTCC_MODE, tp->coalesce_mode |
909 HOSTCC_MODE_ENABLE | tnapi->coal_now);
912 static void tg3_switch_clocks(struct tg3 *tp)
917 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
920 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
922 orig_clock_ctrl = clock_ctrl;
923 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
924 CLOCK_CTRL_CLKRUN_OENABLE |
926 tp->pci_clock_ctrl = clock_ctrl;
928 if (tg3_flag(tp, 5705_PLUS)) {
929 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
930 tw32_wait_f(TG3PCI_CLOCK_CTRL,
931 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
933 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
938 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939 clock_ctrl | (CLOCK_CTRL_ALTCLK),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
945 #define PHY_BUSY_LOOPS 5000
947 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
953 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
955 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
961 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
962 MI_COM_PHY_ADDR_MASK);
963 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
964 MI_COM_REG_ADDR_MASK);
965 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
967 tw32_f(MAC_MI_COM, frame_val);
969 loops = PHY_BUSY_LOOPS;
972 frame_val = tr32(MAC_MI_COM);
974 if ((frame_val & MI_COM_BUSY) == 0) {
976 frame_val = tr32(MAC_MI_COM);
984 *val = frame_val & MI_COM_DATA_MASK;
988 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
989 tw32_f(MAC_MI_MODE, tp->mi_mode);
996 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1002 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1003 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1006 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1008 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1012 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1013 MI_COM_PHY_ADDR_MASK);
1014 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1015 MI_COM_REG_ADDR_MASK);
1016 frame_val |= (val & MI_COM_DATA_MASK);
1017 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1019 tw32_f(MAC_MI_COM, frame_val);
1021 loops = PHY_BUSY_LOOPS;
1022 while (loops != 0) {
1024 frame_val = tr32(MAC_MI_COM);
1025 if ((frame_val & MI_COM_BUSY) == 0) {
1027 frame_val = tr32(MAC_MI_COM);
1037 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1038 tw32_f(MAC_MI_MODE, tp->mi_mode);
1045 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1049 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1053 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1057 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1058 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1062 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1068 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1072 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1076 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1080 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1081 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1085 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1091 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1095 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1097 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1106 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1108 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1117 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1118 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1119 MII_TG3_AUXCTL_SHDWSEL_MISC);
1121 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1128 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1129 set |= MII_TG3_AUXCTL_MISC_WREN;
1131 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1134 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1135 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1136 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1137 MII_TG3_AUXCTL_ACTL_TX_6DB)
1139 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1140 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141 MII_TG3_AUXCTL_ACTL_TX_6DB);
1143 static int tg3_bmcr_reset(struct tg3 *tp)
1148 /* OK, reset it, and poll the BMCR_RESET bit until it
1149 * clears or we time out.
1151 phy_control = BMCR_RESET;
1152 err = tg3_writephy(tp, MII_BMCR, phy_control);
1158 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1162 if ((phy_control & BMCR_RESET) == 0) {
1174 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1176 struct tg3 *tp = bp->priv;
1179 spin_lock_bh(&tp->lock);
1181 if (tg3_readphy(tp, reg, &val))
1184 spin_unlock_bh(&tp->lock);
1189 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1191 struct tg3 *tp = bp->priv;
1194 spin_lock_bh(&tp->lock);
1196 if (tg3_writephy(tp, reg, val))
1199 spin_unlock_bh(&tp->lock);
1204 static int tg3_mdio_reset(struct mii_bus *bp)
1209 static void tg3_mdio_config_5785(struct tg3 *tp)
1212 struct phy_device *phydev;
1214 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1215 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1216 case PHY_ID_BCM50610:
1217 case PHY_ID_BCM50610M:
1218 val = MAC_PHYCFG2_50610_LED_MODES;
1220 case PHY_ID_BCMAC131:
1221 val = MAC_PHYCFG2_AC131_LED_MODES;
1223 case PHY_ID_RTL8211C:
1224 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1226 case PHY_ID_RTL8201E:
1227 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1233 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1234 tw32(MAC_PHYCFG2, val);
1236 val = tr32(MAC_PHYCFG1);
1237 val &= ~(MAC_PHYCFG1_RGMII_INT |
1238 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1239 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1240 tw32(MAC_PHYCFG1, val);
1245 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1246 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1247 MAC_PHYCFG2_FMODE_MASK_MASK |
1248 MAC_PHYCFG2_GMODE_MASK_MASK |
1249 MAC_PHYCFG2_ACT_MASK_MASK |
1250 MAC_PHYCFG2_QUAL_MASK_MASK |
1251 MAC_PHYCFG2_INBAND_ENABLE;
1253 tw32(MAC_PHYCFG2, val);
1255 val = tr32(MAC_PHYCFG1);
1256 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1257 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1258 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1259 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1260 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1261 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1262 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1264 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1265 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1266 tw32(MAC_PHYCFG1, val);
1268 val = tr32(MAC_EXT_RGMII_MODE);
1269 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1270 MAC_RGMII_MODE_RX_QUALITY |
1271 MAC_RGMII_MODE_RX_ACTIVITY |
1272 MAC_RGMII_MODE_RX_ENG_DET |
1273 MAC_RGMII_MODE_TX_ENABLE |
1274 MAC_RGMII_MODE_TX_LOWPWR |
1275 MAC_RGMII_MODE_TX_RESET);
1276 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1277 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1278 val |= MAC_RGMII_MODE_RX_INT_B |
1279 MAC_RGMII_MODE_RX_QUALITY |
1280 MAC_RGMII_MODE_RX_ACTIVITY |
1281 MAC_RGMII_MODE_RX_ENG_DET;
1282 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1283 val |= MAC_RGMII_MODE_TX_ENABLE |
1284 MAC_RGMII_MODE_TX_LOWPWR |
1285 MAC_RGMII_MODE_TX_RESET;
1287 tw32(MAC_EXT_RGMII_MODE, val);
1290 static void tg3_mdio_start(struct tg3 *tp)
1292 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1293 tw32_f(MAC_MI_MODE, tp->mi_mode);
1296 if (tg3_flag(tp, MDIOBUS_INITED) &&
1297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1298 tg3_mdio_config_5785(tp);
1301 static int tg3_mdio_init(struct tg3 *tp)
1305 struct phy_device *phydev;
1307 if (tg3_flag(tp, 5717_PLUS)) {
1310 tp->phy_addr = tp->pci_fn + 1;
1312 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1313 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1315 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1316 TG3_CPMU_PHY_STRAP_IS_SERDES;
1320 tp->phy_addr = TG3_PHY_MII_ADDR;
1324 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1327 tp->mdio_bus = mdiobus_alloc();
1328 if (tp->mdio_bus == NULL)
1331 tp->mdio_bus->name = "tg3 mdio bus";
1332 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1333 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1334 tp->mdio_bus->priv = tp;
1335 tp->mdio_bus->parent = &tp->pdev->dev;
1336 tp->mdio_bus->read = &tg3_mdio_read;
1337 tp->mdio_bus->write = &tg3_mdio_write;
1338 tp->mdio_bus->reset = &tg3_mdio_reset;
1339 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1340 tp->mdio_bus->irq = &tp->mdio_irq[0];
1342 for (i = 0; i < PHY_MAX_ADDR; i++)
1343 tp->mdio_bus->irq[i] = PHY_POLL;
1345 /* The bus registration will look for all the PHYs on the mdio bus.
1346 * Unfortunately, it does not ensure the PHY is powered up before
1347 * accessing the PHY ID registers. A chip reset is the
1348 * quickest way to bring the device back to an operational state..
1350 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1353 i = mdiobus_register(tp->mdio_bus);
1355 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1356 mdiobus_free(tp->mdio_bus);
1360 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1362 if (!phydev || !phydev->drv) {
1363 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1364 mdiobus_unregister(tp->mdio_bus);
1365 mdiobus_free(tp->mdio_bus);
1369 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1370 case PHY_ID_BCM57780:
1371 phydev->interface = PHY_INTERFACE_MODE_GMII;
1372 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1374 case PHY_ID_BCM50610:
1375 case PHY_ID_BCM50610M:
1376 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1377 PHY_BRCM_RX_REFCLK_UNUSED |
1378 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1379 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1380 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1381 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1382 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1383 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1384 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1385 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1387 case PHY_ID_RTL8211C:
1388 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1390 case PHY_ID_RTL8201E:
1391 case PHY_ID_BCMAC131:
1392 phydev->interface = PHY_INTERFACE_MODE_MII;
1393 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1394 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1398 tg3_flag_set(tp, MDIOBUS_INITED);
1400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1401 tg3_mdio_config_5785(tp);
1406 static void tg3_mdio_fini(struct tg3 *tp)
1408 if (tg3_flag(tp, MDIOBUS_INITED)) {
1409 tg3_flag_clear(tp, MDIOBUS_INITED);
1410 mdiobus_unregister(tp->mdio_bus);
1411 mdiobus_free(tp->mdio_bus);
1415 /* tp->lock is held. */
1416 static inline void tg3_generate_fw_event(struct tg3 *tp)
1420 val = tr32(GRC_RX_CPU_EVENT);
1421 val |= GRC_RX_CPU_DRIVER_EVENT;
1422 tw32_f(GRC_RX_CPU_EVENT, val);
1424 tp->last_event_jiffies = jiffies;
1427 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1429 /* tp->lock is held. */
1430 static void tg3_wait_for_event_ack(struct tg3 *tp)
1433 unsigned int delay_cnt;
1436 /* If enough time has passed, no wait is necessary. */
1437 time_remain = (long)(tp->last_event_jiffies + 1 +
1438 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1440 if (time_remain < 0)
1443 /* Check if we can shorten the wait time. */
1444 delay_cnt = jiffies_to_usecs(time_remain);
1445 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1446 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1447 delay_cnt = (delay_cnt >> 3) + 1;
1449 for (i = 0; i < delay_cnt; i++) {
1450 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1456 /* tp->lock is held. */
1457 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1462 if (!tg3_readphy(tp, MII_BMCR, ®))
1464 if (!tg3_readphy(tp, MII_BMSR, ®))
1465 val |= (reg & 0xffff);
1469 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1471 if (!tg3_readphy(tp, MII_LPA, ®))
1472 val |= (reg & 0xffff);
1476 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1477 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1479 if (!tg3_readphy(tp, MII_STAT1000, ®))
1480 val |= (reg & 0xffff);
1484 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1491 /* tp->lock is held. */
1492 static void tg3_ump_link_report(struct tg3 *tp)
1496 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1499 tg3_phy_gather_ump_data(tp, data);
1501 tg3_wait_for_event_ack(tp);
1503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1504 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1505 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1506 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1510 tg3_generate_fw_event(tp);
1513 /* tp->lock is held. */
1514 static void tg3_stop_fw(struct tg3 *tp)
1516 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1517 /* Wait for RX cpu to ACK the previous event. */
1518 tg3_wait_for_event_ack(tp);
1520 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1522 tg3_generate_fw_event(tp);
1524 /* Wait for RX cpu to ACK this event. */
1525 tg3_wait_for_event_ack(tp);
1529 /* tp->lock is held. */
1530 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1532 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1533 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1535 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1537 case RESET_KIND_INIT:
1538 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1542 case RESET_KIND_SHUTDOWN:
1543 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1547 case RESET_KIND_SUSPEND:
1548 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1557 if (kind == RESET_KIND_INIT ||
1558 kind == RESET_KIND_SUSPEND)
1559 tg3_ape_driver_state_change(tp, kind);
1562 /* tp->lock is held. */
1563 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1565 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1567 case RESET_KIND_INIT:
1568 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1569 DRV_STATE_START_DONE);
1572 case RESET_KIND_SHUTDOWN:
1573 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574 DRV_STATE_UNLOAD_DONE);
1582 if (kind == RESET_KIND_SHUTDOWN)
1583 tg3_ape_driver_state_change(tp, kind);
1586 /* tp->lock is held. */
1587 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1589 if (tg3_flag(tp, ENABLE_ASF)) {
1591 case RESET_KIND_INIT:
1592 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1596 case RESET_KIND_SHUTDOWN:
1597 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1601 case RESET_KIND_SUSPEND:
1602 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1612 static int tg3_poll_fw(struct tg3 *tp)
1617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1618 /* Wait up to 20ms for init done. */
1619 for (i = 0; i < 200; i++) {
1620 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1627 /* Wait for firmware initialization to complete. */
1628 for (i = 0; i < 100000; i++) {
1629 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1630 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1635 /* Chip might not be fitted with firmware. Some Sun onboard
1636 * parts are configured like that. So don't signal the timeout
1637 * of the above loop as an error, but do report the lack of
1638 * running firmware once.
1640 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1641 tg3_flag_set(tp, NO_FWARE_REPORTED);
1643 netdev_info(tp->dev, "No firmware running\n");
1646 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1647 /* The 57765 A0 needs a little more
1648 * time to do some important work.
1656 static void tg3_link_report(struct tg3 *tp)
1658 if (!netif_carrier_ok(tp->dev)) {
1659 netif_info(tp, link, tp->dev, "Link is down\n");
1660 tg3_ump_link_report(tp);
1661 } else if (netif_msg_link(tp)) {
1662 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1663 (tp->link_config.active_speed == SPEED_1000 ?
1665 (tp->link_config.active_speed == SPEED_100 ?
1667 (tp->link_config.active_duplex == DUPLEX_FULL ?
1670 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1671 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1673 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1676 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1677 netdev_info(tp->dev, "EEE is %s\n",
1678 tp->setlpicnt ? "enabled" : "disabled");
1680 tg3_ump_link_report(tp);
1684 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1688 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1689 miireg = ADVERTISE_1000XPAUSE;
1690 else if (flow_ctrl & FLOW_CTRL_TX)
1691 miireg = ADVERTISE_1000XPSE_ASYM;
1692 else if (flow_ctrl & FLOW_CTRL_RX)
1693 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1700 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1704 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1705 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1706 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1707 if (lcladv & ADVERTISE_1000XPAUSE)
1709 if (rmtadv & ADVERTISE_1000XPAUSE)
1716 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1720 u32 old_rx_mode = tp->rx_mode;
1721 u32 old_tx_mode = tp->tx_mode;
1723 if (tg3_flag(tp, USE_PHYLIB))
1724 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1726 autoneg = tp->link_config.autoneg;
1728 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1729 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1730 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1732 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1734 flowctrl = tp->link_config.flowctrl;
1736 tp->link_config.active_flowctrl = flowctrl;
1738 if (flowctrl & FLOW_CTRL_RX)
1739 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1741 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1743 if (old_rx_mode != tp->rx_mode)
1744 tw32_f(MAC_RX_MODE, tp->rx_mode);
1746 if (flowctrl & FLOW_CTRL_TX)
1747 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1749 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1751 if (old_tx_mode != tp->tx_mode)
1752 tw32_f(MAC_TX_MODE, tp->tx_mode);
1755 static void tg3_adjust_link(struct net_device *dev)
1757 u8 oldflowctrl, linkmesg = 0;
1758 u32 mac_mode, lcl_adv, rmt_adv;
1759 struct tg3 *tp = netdev_priv(dev);
1760 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1762 spin_lock_bh(&tp->lock);
1764 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1765 MAC_MODE_HALF_DUPLEX);
1767 oldflowctrl = tp->link_config.active_flowctrl;
1773 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1774 mac_mode |= MAC_MODE_PORT_MODE_MII;
1775 else if (phydev->speed == SPEED_1000 ||
1776 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1777 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1779 mac_mode |= MAC_MODE_PORT_MODE_MII;
1781 if (phydev->duplex == DUPLEX_HALF)
1782 mac_mode |= MAC_MODE_HALF_DUPLEX;
1784 lcl_adv = mii_advertise_flowctrl(
1785 tp->link_config.flowctrl);
1788 rmt_adv = LPA_PAUSE_CAP;
1789 if (phydev->asym_pause)
1790 rmt_adv |= LPA_PAUSE_ASYM;
1793 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1795 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1797 if (mac_mode != tp->mac_mode) {
1798 tp->mac_mode = mac_mode;
1799 tw32_f(MAC_MODE, tp->mac_mode);
1803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1804 if (phydev->speed == SPEED_10)
1806 MAC_MI_STAT_10MBPS_MODE |
1807 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1812 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1813 tw32(MAC_TX_LENGTHS,
1814 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1815 (6 << TX_LENGTHS_IPG_SHIFT) |
1816 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1818 tw32(MAC_TX_LENGTHS,
1819 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820 (6 << TX_LENGTHS_IPG_SHIFT) |
1821 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1823 if (phydev->link != tp->old_link ||
1824 phydev->speed != tp->link_config.active_speed ||
1825 phydev->duplex != tp->link_config.active_duplex ||
1826 oldflowctrl != tp->link_config.active_flowctrl)
1829 tp->old_link = phydev->link;
1830 tp->link_config.active_speed = phydev->speed;
1831 tp->link_config.active_duplex = phydev->duplex;
1833 spin_unlock_bh(&tp->lock);
1836 tg3_link_report(tp);
1839 static int tg3_phy_init(struct tg3 *tp)
1841 struct phy_device *phydev;
1843 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1846 /* Bring the PHY back to a known state. */
1849 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1851 /* Attach the MAC to the PHY. */
1852 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1853 phydev->dev_flags, phydev->interface);
1854 if (IS_ERR(phydev)) {
1855 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1856 return PTR_ERR(phydev);
1859 /* Mask with MAC supported features. */
1860 switch (phydev->interface) {
1861 case PHY_INTERFACE_MODE_GMII:
1862 case PHY_INTERFACE_MODE_RGMII:
1863 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1864 phydev->supported &= (PHY_GBIT_FEATURES |
1866 SUPPORTED_Asym_Pause);
1870 case PHY_INTERFACE_MODE_MII:
1871 phydev->supported &= (PHY_BASIC_FEATURES |
1873 SUPPORTED_Asym_Pause);
1876 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1880 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1882 phydev->advertising = phydev->supported;
1887 static void tg3_phy_start(struct tg3 *tp)
1889 struct phy_device *phydev;
1891 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1894 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1896 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1897 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1898 phydev->speed = tp->link_config.speed;
1899 phydev->duplex = tp->link_config.duplex;
1900 phydev->autoneg = tp->link_config.autoneg;
1901 phydev->advertising = tp->link_config.advertising;
1906 phy_start_aneg(phydev);
1909 static void tg3_phy_stop(struct tg3 *tp)
1911 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1914 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1917 static void tg3_phy_fini(struct tg3 *tp)
1919 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1920 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1925 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1930 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1933 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1934 /* Cannot do read-modify-write on 5401 */
1935 err = tg3_phy_auxctl_write(tp,
1936 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1937 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1942 err = tg3_phy_auxctl_read(tp,
1943 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1947 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1948 err = tg3_phy_auxctl_write(tp,
1949 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1955 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1959 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1962 tg3_writephy(tp, MII_TG3_FET_TEST,
1963 phytest | MII_TG3_FET_SHADOW_EN);
1964 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1966 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1969 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1971 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1975 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1979 if (!tg3_flag(tp, 5705_PLUS) ||
1980 (tg3_flag(tp, 5717_PLUS) &&
1981 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1984 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1985 tg3_phy_fet_toggle_apd(tp, enable);
1989 reg = MII_TG3_MISC_SHDW_WREN |
1990 MII_TG3_MISC_SHDW_SCR5_SEL |
1991 MII_TG3_MISC_SHDW_SCR5_LPED |
1992 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1993 MII_TG3_MISC_SHDW_SCR5_SDTL |
1994 MII_TG3_MISC_SHDW_SCR5_C125OE;
1995 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1996 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1998 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2001 reg = MII_TG3_MISC_SHDW_WREN |
2002 MII_TG3_MISC_SHDW_APD_SEL |
2003 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2005 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2007 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2014 if (!tg3_flag(tp, 5705_PLUS) ||
2015 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2018 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2021 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2022 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2024 tg3_writephy(tp, MII_TG3_FET_TEST,
2025 ephy | MII_TG3_FET_SHADOW_EN);
2026 if (!tg3_readphy(tp, reg, &phy)) {
2028 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2031 tg3_writephy(tp, reg, phy);
2033 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2038 ret = tg3_phy_auxctl_read(tp,
2039 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2042 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2045 tg3_phy_auxctl_write(tp,
2046 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2051 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2056 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2059 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2061 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2062 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2065 static void tg3_phy_apply_otp(struct tg3 *tp)
2074 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2077 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2078 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2079 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2081 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2082 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2083 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2085 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2086 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2087 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2089 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2090 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2092 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2093 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2095 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2096 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2097 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2099 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2102 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2106 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2111 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2112 current_link_up == 1 &&
2113 tp->link_config.active_duplex == DUPLEX_FULL &&
2114 (tp->link_config.active_speed == SPEED_100 ||
2115 tp->link_config.active_speed == SPEED_1000)) {
2118 if (tp->link_config.active_speed == SPEED_1000)
2119 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2121 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2123 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2125 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2126 TG3_CL45_D7_EEERES_STAT, &val);
2128 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2129 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2133 if (!tp->setlpicnt) {
2134 if (current_link_up == 1 &&
2135 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2136 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2137 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2140 val = tr32(TG3_CPMU_EEE_MODE);
2141 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2145 static void tg3_phy_eee_enable(struct tg3 *tp)
2149 if (tp->link_config.active_speed == SPEED_1000 &&
2150 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2152 tg3_flag(tp, 57765_CLASS)) &&
2153 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2154 val = MII_TG3_DSP_TAP26_ALNOKO |
2155 MII_TG3_DSP_TAP26_RMRXSTO;
2156 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2157 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2160 val = tr32(TG3_CPMU_EEE_MODE);
2161 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2164 static int tg3_wait_macro_done(struct tg3 *tp)
2171 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2172 if ((tmp32 & 0x1000) == 0)
2182 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2184 static const u32 test_pat[4][6] = {
2185 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2186 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2187 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2188 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2192 for (chan = 0; chan < 4; chan++) {
2195 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2196 (chan * 0x2000) | 0x0200);
2197 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2199 for (i = 0; i < 6; i++)
2200 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2203 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2204 if (tg3_wait_macro_done(tp)) {
2209 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2210 (chan * 0x2000) | 0x0200);
2211 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2212 if (tg3_wait_macro_done(tp)) {
2217 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2218 if (tg3_wait_macro_done(tp)) {
2223 for (i = 0; i < 6; i += 2) {
2226 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2227 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2228 tg3_wait_macro_done(tp)) {
2234 if (low != test_pat[chan][i] ||
2235 high != test_pat[chan][i+1]) {
2236 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2237 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2238 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2248 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2252 for (chan = 0; chan < 4; chan++) {
2255 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2256 (chan * 0x2000) | 0x0200);
2257 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2258 for (i = 0; i < 6; i++)
2259 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2260 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2261 if (tg3_wait_macro_done(tp))
2268 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2270 u32 reg32, phy9_orig;
2271 int retries, do_phy_reset, err;
2277 err = tg3_bmcr_reset(tp);
2283 /* Disable transmitter and interrupt. */
2284 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2288 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2290 /* Set full-duplex, 1000 mbps. */
2291 tg3_writephy(tp, MII_BMCR,
2292 BMCR_FULLDPLX | BMCR_SPEED1000);
2294 /* Set to master mode. */
2295 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2298 tg3_writephy(tp, MII_CTRL1000,
2299 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2301 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2305 /* Block the PHY control access. */
2306 tg3_phydsp_write(tp, 0x8005, 0x0800);
2308 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2311 } while (--retries);
2313 err = tg3_phy_reset_chanpat(tp);
2317 tg3_phydsp_write(tp, 0x8005, 0x0000);
2319 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2320 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2322 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2324 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2326 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2328 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2335 /* This will reset the tigon3 PHY if there is no valid
2336 * link unless the FORCE argument is non-zero.
2338 static int tg3_phy_reset(struct tg3 *tp)
2343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2344 val = tr32(GRC_MISC_CFG);
2345 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2348 err = tg3_readphy(tp, MII_BMSR, &val);
2349 err |= tg3_readphy(tp, MII_BMSR, &val);
2353 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2354 netif_carrier_off(tp->dev);
2355 tg3_link_report(tp);
2358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2361 err = tg3_phy_reset_5703_4_5(tp);
2368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2369 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2370 cpmuctrl = tr32(TG3_CPMU_CTRL);
2371 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2373 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2376 err = tg3_bmcr_reset(tp);
2380 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2381 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2382 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2384 tw32(TG3_CPMU_CTRL, cpmuctrl);
2387 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2388 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2389 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2390 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2391 CPMU_LSPD_1000MB_MACCLK_12_5) {
2392 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2394 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2398 if (tg3_flag(tp, 5717_PLUS) &&
2399 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2402 tg3_phy_apply_otp(tp);
2404 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2405 tg3_phy_toggle_apd(tp, true);
2407 tg3_phy_toggle_apd(tp, false);
2410 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2411 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2412 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2413 tg3_phydsp_write(tp, 0x000a, 0x0323);
2414 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2418 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2422 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2423 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2424 tg3_phydsp_write(tp, 0x000a, 0x310b);
2425 tg3_phydsp_write(tp, 0x201f, 0x9506);
2426 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2427 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2430 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2431 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2432 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2433 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2434 tg3_writephy(tp, MII_TG3_TEST1,
2435 MII_TG3_TEST1_TRIM_EN | 0x4);
2437 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2439 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2443 /* Set Extended packet length bit (bit 14) on all chips that */
2444 /* support jumbo frames */
2445 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2446 /* Cannot do read-modify-write on 5401 */
2447 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2448 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2449 /* Set bit 14 with read-modify-write to preserve other bits */
2450 err = tg3_phy_auxctl_read(tp,
2451 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2453 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2454 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2457 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2458 * jumbo frames transmission.
2460 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2461 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2462 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2463 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467 /* adjust output voltage */
2468 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2471 tg3_phy_toggle_automdix(tp, 1);
2472 tg3_phy_set_wirespeed(tp);
2476 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2477 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2478 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2479 TG3_GPIO_MSG_NEED_VAUX)
2480 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2481 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2482 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2483 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2484 (TG3_GPIO_MSG_DRVR_PRES << 12))
2486 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2487 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2488 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2489 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2490 (TG3_GPIO_MSG_NEED_VAUX << 12))
2492 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2498 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2500 status = tr32(TG3_CPMU_DRV_STATUS);
2502 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2503 status &= ~(TG3_GPIO_MSG_MASK << shift);
2504 status |= (newstat << shift);
2506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2508 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2510 tw32(TG3_CPMU_DRV_STATUS, status);
2512 return status >> TG3_APE_GPIO_MSG_SHIFT;
2515 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2517 if (!tg3_flag(tp, IS_NIC))
2520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2523 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2526 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2528 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2529 TG3_GRC_LCLCTL_PWRSW_DELAY);
2531 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2533 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534 TG3_GRC_LCLCTL_PWRSW_DELAY);
2540 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2544 if (!tg3_flag(tp, IS_NIC) ||
2545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2549 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2551 tw32_wait_f(GRC_LOCAL_CTRL,
2552 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2553 TG3_GRC_LCLCTL_PWRSW_DELAY);
2555 tw32_wait_f(GRC_LOCAL_CTRL,
2557 TG3_GRC_LCLCTL_PWRSW_DELAY);
2559 tw32_wait_f(GRC_LOCAL_CTRL,
2560 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2561 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2566 if (!tg3_flag(tp, IS_NIC))
2569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2571 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2572 (GRC_LCLCTRL_GPIO_OE0 |
2573 GRC_LCLCTRL_GPIO_OE1 |
2574 GRC_LCLCTRL_GPIO_OE2 |
2575 GRC_LCLCTRL_GPIO_OUTPUT0 |
2576 GRC_LCLCTRL_GPIO_OUTPUT1),
2577 TG3_GRC_LCLCTL_PWRSW_DELAY);
2578 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2579 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2580 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2581 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2582 GRC_LCLCTRL_GPIO_OE1 |
2583 GRC_LCLCTRL_GPIO_OE2 |
2584 GRC_LCLCTRL_GPIO_OUTPUT0 |
2585 GRC_LCLCTRL_GPIO_OUTPUT1 |
2587 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2588 TG3_GRC_LCLCTL_PWRSW_DELAY);
2590 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2591 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592 TG3_GRC_LCLCTL_PWRSW_DELAY);
2594 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2595 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 u32 grc_local_ctrl = 0;
2601 /* Workaround to prevent overdrawing Amps. */
2602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2603 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2604 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2606 TG3_GRC_LCLCTL_PWRSW_DELAY);
2609 /* On 5753 and variants, GPIO2 cannot be used. */
2610 no_gpio2 = tp->nic_sram_data_cfg &
2611 NIC_SRAM_DATA_CFG_NO_GPIO2;
2613 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2614 GRC_LCLCTRL_GPIO_OE1 |
2615 GRC_LCLCTRL_GPIO_OE2 |
2616 GRC_LCLCTRL_GPIO_OUTPUT1 |
2617 GRC_LCLCTRL_GPIO_OUTPUT2;
2619 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2620 GRC_LCLCTRL_GPIO_OUTPUT2);
2622 tw32_wait_f(GRC_LOCAL_CTRL,
2623 tp->grc_local_ctrl | grc_local_ctrl,
2624 TG3_GRC_LCLCTL_PWRSW_DELAY);
2626 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2628 tw32_wait_f(GRC_LOCAL_CTRL,
2629 tp->grc_local_ctrl | grc_local_ctrl,
2630 TG3_GRC_LCLCTL_PWRSW_DELAY);
2633 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2634 tw32_wait_f(GRC_LOCAL_CTRL,
2635 tp->grc_local_ctrl | grc_local_ctrl,
2636 TG3_GRC_LCLCTL_PWRSW_DELAY);
2641 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2645 /* Serialize power state transitions */
2646 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2649 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2650 msg = TG3_GPIO_MSG_NEED_VAUX;
2652 msg = tg3_set_function_status(tp, msg);
2654 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2657 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2658 tg3_pwrsrc_switch_to_vaux(tp);
2660 tg3_pwrsrc_die_with_vmain(tp);
2663 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2666 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2668 bool need_vaux = false;
2670 /* The GPIOs do something completely different on 57765. */
2671 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2676 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2677 tg3_frob_aux_power_5717(tp, include_wol ?
2678 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2682 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2683 struct net_device *dev_peer;
2685 dev_peer = pci_get_drvdata(tp->pdev_peer);
2687 /* remove_one() may have been run on the peer. */
2689 struct tg3 *tp_peer = netdev_priv(dev_peer);
2691 if (tg3_flag(tp_peer, INIT_COMPLETE))
2694 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2695 tg3_flag(tp_peer, ENABLE_ASF))
2700 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2701 tg3_flag(tp, ENABLE_ASF))
2705 tg3_pwrsrc_switch_to_vaux(tp);
2707 tg3_pwrsrc_die_with_vmain(tp);
2710 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2712 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2714 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2715 if (speed != SPEED_10)
2717 } else if (speed == SPEED_10)
2723 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2727 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2729 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2730 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2733 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2734 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2735 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2742 val = tr32(GRC_MISC_CFG);
2743 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2746 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2751 tg3_writephy(tp, MII_ADVERTISE, 0);
2752 tg3_writephy(tp, MII_BMCR,
2753 BMCR_ANENABLE | BMCR_ANRESTART);
2755 tg3_writephy(tp, MII_TG3_FET_TEST,
2756 phytest | MII_TG3_FET_SHADOW_EN);
2757 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2758 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2760 MII_TG3_FET_SHDW_AUXMODE4,
2763 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2766 } else if (do_low_power) {
2767 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2768 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2770 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2771 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2772 MII_TG3_AUXCTL_PCTL_VREG_11V;
2773 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2776 /* The PHY should not be powered down on some chips because
2779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2780 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2781 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2782 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2785 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2786 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2787 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2788 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2789 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2790 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2793 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2796 /* tp->lock is held. */
2797 static int tg3_nvram_lock(struct tg3 *tp)
2799 if (tg3_flag(tp, NVRAM)) {
2802 if (tp->nvram_lock_cnt == 0) {
2803 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2804 for (i = 0; i < 8000; i++) {
2805 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2810 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2814 tp->nvram_lock_cnt++;
2819 /* tp->lock is held. */
2820 static void tg3_nvram_unlock(struct tg3 *tp)
2822 if (tg3_flag(tp, NVRAM)) {
2823 if (tp->nvram_lock_cnt > 0)
2824 tp->nvram_lock_cnt--;
2825 if (tp->nvram_lock_cnt == 0)
2826 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2830 /* tp->lock is held. */
2831 static void tg3_enable_nvram_access(struct tg3 *tp)
2833 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2834 u32 nvaccess = tr32(NVRAM_ACCESS);
2836 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2840 /* tp->lock is held. */
2841 static void tg3_disable_nvram_access(struct tg3 *tp)
2843 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2844 u32 nvaccess = tr32(NVRAM_ACCESS);
2846 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2850 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2851 u32 offset, u32 *val)
2856 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2859 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2860 EEPROM_ADDR_DEVID_MASK |
2862 tw32(GRC_EEPROM_ADDR,
2864 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2865 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2866 EEPROM_ADDR_ADDR_MASK) |
2867 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2869 for (i = 0; i < 1000; i++) {
2870 tmp = tr32(GRC_EEPROM_ADDR);
2872 if (tmp & EEPROM_ADDR_COMPLETE)
2876 if (!(tmp & EEPROM_ADDR_COMPLETE))
2879 tmp = tr32(GRC_EEPROM_DATA);
2882 * The data will always be opposite the native endian
2883 * format. Perform a blind byteswap to compensate.
2890 #define NVRAM_CMD_TIMEOUT 10000
2892 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2896 tw32(NVRAM_CMD, nvram_cmd);
2897 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2899 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2905 if (i == NVRAM_CMD_TIMEOUT)
2911 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2913 if (tg3_flag(tp, NVRAM) &&
2914 tg3_flag(tp, NVRAM_BUFFERED) &&
2915 tg3_flag(tp, FLASH) &&
2916 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2917 (tp->nvram_jedecnum == JEDEC_ATMEL))
2919 addr = ((addr / tp->nvram_pagesize) <<
2920 ATMEL_AT45DB0X1B_PAGE_POS) +
2921 (addr % tp->nvram_pagesize);
2926 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2928 if (tg3_flag(tp, NVRAM) &&
2929 tg3_flag(tp, NVRAM_BUFFERED) &&
2930 tg3_flag(tp, FLASH) &&
2931 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932 (tp->nvram_jedecnum == JEDEC_ATMEL))
2934 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2935 tp->nvram_pagesize) +
2936 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2941 /* NOTE: Data read in from NVRAM is byteswapped according to
2942 * the byteswapping settings for all other register accesses.
2943 * tg3 devices are BE devices, so on a BE machine, the data
2944 * returned will be exactly as it is seen in NVRAM. On a LE
2945 * machine, the 32-bit value will be byteswapped.
2947 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2951 if (!tg3_flag(tp, NVRAM))
2952 return tg3_nvram_read_using_eeprom(tp, offset, val);
2954 offset = tg3_nvram_phys_addr(tp, offset);
2956 if (offset > NVRAM_ADDR_MSK)
2959 ret = tg3_nvram_lock(tp);
2963 tg3_enable_nvram_access(tp);
2965 tw32(NVRAM_ADDR, offset);
2966 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2967 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2970 *val = tr32(NVRAM_RDDATA);
2972 tg3_disable_nvram_access(tp);
2974 tg3_nvram_unlock(tp);
2979 /* Ensures NVRAM data is in bytestream format. */
2980 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2983 int res = tg3_nvram_read(tp, offset, &v);
2985 *val = cpu_to_be32(v);
2989 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2990 u32 offset, u32 len, u8 *buf)
2995 for (i = 0; i < len; i += 4) {
3001 memcpy(&data, buf + i, 4);
3004 * The SEEPROM interface expects the data to always be opposite
3005 * the native endian format. We accomplish this by reversing
3006 * all the operations that would have been performed on the
3007 * data from a call to tg3_nvram_read_be32().
3009 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3011 val = tr32(GRC_EEPROM_ADDR);
3012 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3014 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3016 tw32(GRC_EEPROM_ADDR, val |
3017 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3018 (addr & EEPROM_ADDR_ADDR_MASK) |
3022 for (j = 0; j < 1000; j++) {
3023 val = tr32(GRC_EEPROM_ADDR);
3025 if (val & EEPROM_ADDR_COMPLETE)
3029 if (!(val & EEPROM_ADDR_COMPLETE)) {
3038 /* offset and length are dword aligned */
3039 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3043 u32 pagesize = tp->nvram_pagesize;
3044 u32 pagemask = pagesize - 1;
3048 tmp = kmalloc(pagesize, GFP_KERNEL);
3054 u32 phy_addr, page_off, size;
3056 phy_addr = offset & ~pagemask;
3058 for (j = 0; j < pagesize; j += 4) {
3059 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3060 (__be32 *) (tmp + j));
3067 page_off = offset & pagemask;
3074 memcpy(tmp + page_off, buf, size);
3076 offset = offset + (pagesize - page_off);
3078 tg3_enable_nvram_access(tp);
3081 * Before we can erase the flash page, we need
3082 * to issue a special "write enable" command.
3084 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3086 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3089 /* Erase the target page */
3090 tw32(NVRAM_ADDR, phy_addr);
3092 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3093 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3095 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3098 /* Issue another write enable to start the write. */
3099 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3101 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3104 for (j = 0; j < pagesize; j += 4) {
3107 data = *((__be32 *) (tmp + j));
3109 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3111 tw32(NVRAM_ADDR, phy_addr + j);
3113 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3117 nvram_cmd |= NVRAM_CMD_FIRST;
3118 else if (j == (pagesize - 4))
3119 nvram_cmd |= NVRAM_CMD_LAST;
3121 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3129 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3130 tg3_nvram_exec_cmd(tp, nvram_cmd);
3137 /* offset and length are dword aligned */
3138 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3143 for (i = 0; i < len; i += 4, offset += 4) {
3144 u32 page_off, phy_addr, nvram_cmd;
3147 memcpy(&data, buf + i, 4);
3148 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3150 page_off = offset % tp->nvram_pagesize;
3152 phy_addr = tg3_nvram_phys_addr(tp, offset);
3154 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3156 if (page_off == 0 || i == 0)
3157 nvram_cmd |= NVRAM_CMD_FIRST;
3158 if (page_off == (tp->nvram_pagesize - 4))
3159 nvram_cmd |= NVRAM_CMD_LAST;
3162 nvram_cmd |= NVRAM_CMD_LAST;
3164 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3165 !tg3_flag(tp, FLASH) ||
3166 !tg3_flag(tp, 57765_PLUS))
3167 tw32(NVRAM_ADDR, phy_addr);
3169 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3170 !tg3_flag(tp, 5755_PLUS) &&
3171 (tp->nvram_jedecnum == JEDEC_ST) &&
3172 (nvram_cmd & NVRAM_CMD_FIRST)) {
3175 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3176 ret = tg3_nvram_exec_cmd(tp, cmd);
3180 if (!tg3_flag(tp, FLASH)) {
3181 /* We always do complete word writes to eeprom. */
3182 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3185 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3192 /* offset and length are dword aligned */
3193 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3197 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3198 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3199 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3203 if (!tg3_flag(tp, NVRAM)) {
3204 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3208 ret = tg3_nvram_lock(tp);
3212 tg3_enable_nvram_access(tp);
3213 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3214 tw32(NVRAM_WRITE1, 0x406);
3216 grc_mode = tr32(GRC_MODE);
3217 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3219 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3220 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3223 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3227 grc_mode = tr32(GRC_MODE);
3228 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3230 tg3_disable_nvram_access(tp);
3231 tg3_nvram_unlock(tp);
3234 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3235 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3242 #define RX_CPU_SCRATCH_BASE 0x30000
3243 #define RX_CPU_SCRATCH_SIZE 0x04000
3244 #define TX_CPU_SCRATCH_BASE 0x34000
3245 #define TX_CPU_SCRATCH_SIZE 0x04000
3247 /* tp->lock is held. */
3248 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3252 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3255 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3257 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3260 if (offset == RX_CPU_BASE) {
3261 for (i = 0; i < 10000; i++) {
3262 tw32(offset + CPU_STATE, 0xffffffff);
3263 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3264 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3268 tw32(offset + CPU_STATE, 0xffffffff);
3269 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3272 for (i = 0; i < 10000; i++) {
3273 tw32(offset + CPU_STATE, 0xffffffff);
3274 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3275 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3281 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3282 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3286 /* Clear firmware's nvram arbitration. */
3287 if (tg3_flag(tp, NVRAM))
3288 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3293 unsigned int fw_base;
3294 unsigned int fw_len;
3295 const __be32 *fw_data;
3298 /* tp->lock is held. */
3299 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3300 u32 cpu_scratch_base, int cpu_scratch_size,
3301 struct fw_info *info)
3303 int err, lock_err, i;
3304 void (*write_op)(struct tg3 *, u32, u32);
3306 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3308 "%s: Trying to load TX cpu firmware which is 5705\n",
3313 if (tg3_flag(tp, 5705_PLUS))
3314 write_op = tg3_write_mem;
3316 write_op = tg3_write_indirect_reg32;
3318 /* It is possible that bootcode is still loading at this point.
3319 * Get the nvram lock first before halting the cpu.
3321 lock_err = tg3_nvram_lock(tp);
3322 err = tg3_halt_cpu(tp, cpu_base);
3324 tg3_nvram_unlock(tp);
3328 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3329 write_op(tp, cpu_scratch_base + i, 0);
3330 tw32(cpu_base + CPU_STATE, 0xffffffff);
3331 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3332 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3333 write_op(tp, (cpu_scratch_base +
3334 (info->fw_base & 0xffff) +
3336 be32_to_cpu(info->fw_data[i]));
3344 /* tp->lock is held. */
3345 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3347 struct fw_info info;
3348 const __be32 *fw_data;
3351 fw_data = (void *)tp->fw->data;
3353 /* Firmware blob starts with version numbers, followed by
3354 start address and length. We are setting complete length.
3355 length = end_address_of_bss - start_address_of_text.
3356 Remainder is the blob to be loaded contiguously
3357 from start address. */
3359 info.fw_base = be32_to_cpu(fw_data[1]);
3360 info.fw_len = tp->fw->size - 12;
3361 info.fw_data = &fw_data[3];
3363 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3364 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3369 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3370 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3375 /* Now startup only the RX cpu. */
3376 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3377 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3379 for (i = 0; i < 5; i++) {
3380 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3382 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3384 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3388 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3389 "should be %08x\n", __func__,
3390 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3393 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3394 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3399 /* tp->lock is held. */
3400 static int tg3_load_tso_firmware(struct tg3 *tp)
3402 struct fw_info info;
3403 const __be32 *fw_data;
3404 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3407 if (tg3_flag(tp, HW_TSO_1) ||
3408 tg3_flag(tp, HW_TSO_2) ||
3409 tg3_flag(tp, HW_TSO_3))
3412 fw_data = (void *)tp->fw->data;
3414 /* Firmware blob starts with version numbers, followed by
3415 start address and length. We are setting complete length.
3416 length = end_address_of_bss - start_address_of_text.
3417 Remainder is the blob to be loaded contiguously
3418 from start address. */
3420 info.fw_base = be32_to_cpu(fw_data[1]);
3421 cpu_scratch_size = tp->fw_len;
3422 info.fw_len = tp->fw->size - 12;
3423 info.fw_data = &fw_data[3];
3425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3426 cpu_base = RX_CPU_BASE;
3427 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3429 cpu_base = TX_CPU_BASE;
3430 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3431 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3434 err = tg3_load_firmware_cpu(tp, cpu_base,
3435 cpu_scratch_base, cpu_scratch_size,
3440 /* Now startup the cpu. */
3441 tw32(cpu_base + CPU_STATE, 0xffffffff);
3442 tw32_f(cpu_base + CPU_PC, info.fw_base);
3444 for (i = 0; i < 5; i++) {
3445 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3447 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3449 tw32_f(cpu_base + CPU_PC, info.fw_base);
3454 "%s fails to set CPU PC, is %08x should be %08x\n",
3455 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3458 tw32(cpu_base + CPU_STATE, 0xffffffff);
3459 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3464 /* tp->lock is held. */
3465 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3467 u32 addr_high, addr_low;
3470 addr_high = ((tp->dev->dev_addr[0] << 8) |
3471 tp->dev->dev_addr[1]);
3472 addr_low = ((tp->dev->dev_addr[2] << 24) |
3473 (tp->dev->dev_addr[3] << 16) |
3474 (tp->dev->dev_addr[4] << 8) |
3475 (tp->dev->dev_addr[5] << 0));
3476 for (i = 0; i < 4; i++) {
3477 if (i == 1 && skip_mac_1)
3479 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3480 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3485 for (i = 0; i < 12; i++) {
3486 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3487 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3491 addr_high = (tp->dev->dev_addr[0] +
3492 tp->dev->dev_addr[1] +
3493 tp->dev->dev_addr[2] +
3494 tp->dev->dev_addr[3] +
3495 tp->dev->dev_addr[4] +
3496 tp->dev->dev_addr[5]) &
3497 TX_BACKOFF_SEED_MASK;
3498 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3501 static void tg3_enable_register_access(struct tg3 *tp)
3504 * Make sure register accesses (indirect or otherwise) will function
3507 pci_write_config_dword(tp->pdev,
3508 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3511 static int tg3_power_up(struct tg3 *tp)
3515 tg3_enable_register_access(tp);
3517 err = pci_set_power_state(tp->pdev, PCI_D0);
3519 /* Switch out of Vaux if it is a NIC */
3520 tg3_pwrsrc_switch_to_vmain(tp);
3522 netdev_err(tp->dev, "Transition to D0 failed\n");
3528 static int tg3_setup_phy(struct tg3 *, int);
3530 static int tg3_power_down_prepare(struct tg3 *tp)
3533 bool device_should_wake, do_low_power;
3535 tg3_enable_register_access(tp);
3537 /* Restore the CLKREQ setting. */
3538 if (tg3_flag(tp, CLKREQ_BUG)) {
3541 pci_read_config_word(tp->pdev,
3542 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3544 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3545 pci_write_config_word(tp->pdev,
3546 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3550 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3551 tw32(TG3PCI_MISC_HOST_CTRL,
3552 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3554 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3555 tg3_flag(tp, WOL_ENABLE);
3557 if (tg3_flag(tp, USE_PHYLIB)) {
3558 do_low_power = false;
3559 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3560 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3561 struct phy_device *phydev;
3562 u32 phyid, advertising;
3564 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3566 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3568 tp->link_config.speed = phydev->speed;
3569 tp->link_config.duplex = phydev->duplex;
3570 tp->link_config.autoneg = phydev->autoneg;
3571 tp->link_config.advertising = phydev->advertising;
3573 advertising = ADVERTISED_TP |
3575 ADVERTISED_Autoneg |
3576 ADVERTISED_10baseT_Half;
3578 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3579 if (tg3_flag(tp, WOL_SPEED_100MB))
3581 ADVERTISED_100baseT_Half |
3582 ADVERTISED_100baseT_Full |
3583 ADVERTISED_10baseT_Full;
3585 advertising |= ADVERTISED_10baseT_Full;
3588 phydev->advertising = advertising;
3590 phy_start_aneg(phydev);
3592 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3593 if (phyid != PHY_ID_BCMAC131) {
3594 phyid &= PHY_BCM_OUI_MASK;
3595 if (phyid == PHY_BCM_OUI_1 ||
3596 phyid == PHY_BCM_OUI_2 ||
3597 phyid == PHY_BCM_OUI_3)
3598 do_low_power = true;
3602 do_low_power = true;
3604 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3605 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3607 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3608 tg3_setup_phy(tp, 0);
3611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3614 val = tr32(GRC_VCPU_EXT_CTRL);
3615 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3616 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3620 for (i = 0; i < 200; i++) {
3621 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3622 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3627 if (tg3_flag(tp, WOL_CAP))
3628 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3629 WOL_DRV_STATE_SHUTDOWN |
3633 if (device_should_wake) {
3636 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3638 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3639 tg3_phy_auxctl_write(tp,
3640 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3641 MII_TG3_AUXCTL_PCTL_WOL_EN |
3642 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3643 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3647 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3648 mac_mode = MAC_MODE_PORT_MODE_GMII;
3650 mac_mode = MAC_MODE_PORT_MODE_MII;
3652 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3653 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3655 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3656 SPEED_100 : SPEED_10;
3657 if (tg3_5700_link_polarity(tp, speed))
3658 mac_mode |= MAC_MODE_LINK_POLARITY;
3660 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3663 mac_mode = MAC_MODE_PORT_MODE_TBI;
3666 if (!tg3_flag(tp, 5750_PLUS))
3667 tw32(MAC_LED_CTRL, tp->led_ctrl);
3669 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3670 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3671 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3672 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3674 if (tg3_flag(tp, ENABLE_APE))
3675 mac_mode |= MAC_MODE_APE_TX_EN |
3676 MAC_MODE_APE_RX_EN |
3677 MAC_MODE_TDE_ENABLE;
3679 tw32_f(MAC_MODE, mac_mode);
3682 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3686 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3687 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3691 base_val = tp->pci_clock_ctrl;
3692 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3693 CLOCK_CTRL_TXCLK_DISABLE);
3695 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3696 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3697 } else if (tg3_flag(tp, 5780_CLASS) ||
3698 tg3_flag(tp, CPMU_PRESENT) ||
3699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3701 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3702 u32 newbits1, newbits2;
3704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3706 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3707 CLOCK_CTRL_TXCLK_DISABLE |
3709 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710 } else if (tg3_flag(tp, 5705_PLUS)) {
3711 newbits1 = CLOCK_CTRL_625_CORE;
3712 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3714 newbits1 = CLOCK_CTRL_ALTCLK;
3715 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3718 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3721 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3724 if (!tg3_flag(tp, 5705_PLUS)) {
3727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3729 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3730 CLOCK_CTRL_TXCLK_DISABLE |
3731 CLOCK_CTRL_44MHZ_CORE);
3733 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3736 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3737 tp->pci_clock_ctrl | newbits3, 40);
3741 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3742 tg3_power_down_phy(tp, do_low_power);
3744 tg3_frob_aux_power(tp, true);
3746 /* Workaround for unstable PLL clock */
3747 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3748 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3749 u32 val = tr32(0x7d00);
3751 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3753 if (!tg3_flag(tp, ENABLE_ASF)) {
3756 err = tg3_nvram_lock(tp);
3757 tg3_halt_cpu(tp, RX_CPU_BASE);
3759 tg3_nvram_unlock(tp);
3763 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3768 static void tg3_power_down(struct tg3 *tp)
3770 tg3_power_down_prepare(tp);
3772 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3773 pci_set_power_state(tp->pdev, PCI_D3hot);
3776 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3778 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3779 case MII_TG3_AUX_STAT_10HALF:
3781 *duplex = DUPLEX_HALF;
3784 case MII_TG3_AUX_STAT_10FULL:
3786 *duplex = DUPLEX_FULL;
3789 case MII_TG3_AUX_STAT_100HALF:
3791 *duplex = DUPLEX_HALF;
3794 case MII_TG3_AUX_STAT_100FULL:
3796 *duplex = DUPLEX_FULL;
3799 case MII_TG3_AUX_STAT_1000HALF:
3800 *speed = SPEED_1000;
3801 *duplex = DUPLEX_HALF;
3804 case MII_TG3_AUX_STAT_1000FULL:
3805 *speed = SPEED_1000;
3806 *duplex = DUPLEX_FULL;
3810 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3811 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3813 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3817 *speed = SPEED_UNKNOWN;
3818 *duplex = DUPLEX_UNKNOWN;
3823 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3828 new_adv = ADVERTISE_CSMA;
3829 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3830 new_adv |= mii_advertise_flowctrl(flowctrl);
3832 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3836 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3837 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3839 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3840 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3841 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3843 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3848 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3851 tw32(TG3_CPMU_EEE_MODE,
3852 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3854 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3859 /* Advertise 100-BaseTX EEE ability */
3860 if (advertise & ADVERTISED_100baseT_Full)
3861 val |= MDIO_AN_EEE_ADV_100TX;
3862 /* Advertise 1000-BaseT EEE ability */
3863 if (advertise & ADVERTISED_1000baseT_Full)
3864 val |= MDIO_AN_EEE_ADV_1000T;
3865 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3869 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3871 case ASIC_REV_57765:
3872 case ASIC_REV_57766:
3874 /* If we advertised any eee advertisements above... */
3876 val = MII_TG3_DSP_TAP26_ALNOKO |
3877 MII_TG3_DSP_TAP26_RMRXSTO |
3878 MII_TG3_DSP_TAP26_OPCSINPT;
3879 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3882 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3883 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3884 MII_TG3_DSP_CH34TP2_HIBW01);
3887 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3896 static void tg3_phy_copper_begin(struct tg3 *tp)
3898 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3899 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3902 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3903 adv = ADVERTISED_10baseT_Half |
3904 ADVERTISED_10baseT_Full;
3905 if (tg3_flag(tp, WOL_SPEED_100MB))
3906 adv |= ADVERTISED_100baseT_Half |
3907 ADVERTISED_100baseT_Full;
3909 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3911 adv = tp->link_config.advertising;
3912 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3913 adv &= ~(ADVERTISED_1000baseT_Half |
3914 ADVERTISED_1000baseT_Full);
3916 fc = tp->link_config.flowctrl;
3919 tg3_phy_autoneg_cfg(tp, adv, fc);
3921 tg3_writephy(tp, MII_BMCR,
3922 BMCR_ANENABLE | BMCR_ANRESTART);
3925 u32 bmcr, orig_bmcr;
3927 tp->link_config.active_speed = tp->link_config.speed;
3928 tp->link_config.active_duplex = tp->link_config.duplex;
3931 switch (tp->link_config.speed) {
3937 bmcr |= BMCR_SPEED100;
3941 bmcr |= BMCR_SPEED1000;
3945 if (tp->link_config.duplex == DUPLEX_FULL)
3946 bmcr |= BMCR_FULLDPLX;
3948 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3949 (bmcr != orig_bmcr)) {
3950 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3951 for (i = 0; i < 1500; i++) {
3955 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3956 tg3_readphy(tp, MII_BMSR, &tmp))
3958 if (!(tmp & BMSR_LSTATUS)) {
3963 tg3_writephy(tp, MII_BMCR, bmcr);
3969 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3973 /* Turn off tap power management. */
3974 /* Set Extended packet length bit */
3975 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3977 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3978 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3979 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3980 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3981 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3988 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3990 u32 advmsk, tgtadv, advertising;
3992 advertising = tp->link_config.advertising;
3993 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3995 advmsk = ADVERTISE_ALL;
3996 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3997 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3998 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4001 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4004 if ((*lcladv & advmsk) != tgtadv)
4007 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4010 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4012 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4016 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4017 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4018 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4019 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4020 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4022 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4025 if (tg3_ctrl != tgtadv)
4032 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4036 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4039 if (tg3_readphy(tp, MII_STAT1000, &val))
4042 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4045 if (tg3_readphy(tp, MII_LPA, rmtadv))
4048 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4049 tp->link_config.rmt_adv = lpeth;
4054 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4056 int current_link_up;
4058 u32 lcl_adv, rmt_adv;
4066 (MAC_STATUS_SYNC_CHANGED |
4067 MAC_STATUS_CFG_CHANGED |
4068 MAC_STATUS_MI_COMPLETION |
4069 MAC_STATUS_LNKSTATE_CHANGED));
4072 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4074 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4078 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4080 /* Some third-party PHYs need to be reset on link going
4083 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4086 netif_carrier_ok(tp->dev)) {
4087 tg3_readphy(tp, MII_BMSR, &bmsr);
4088 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4089 !(bmsr & BMSR_LSTATUS))
4095 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4096 tg3_readphy(tp, MII_BMSR, &bmsr);
4097 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4098 !tg3_flag(tp, INIT_COMPLETE))
4101 if (!(bmsr & BMSR_LSTATUS)) {
4102 err = tg3_init_5401phy_dsp(tp);
4106 tg3_readphy(tp, MII_BMSR, &bmsr);
4107 for (i = 0; i < 1000; i++) {
4109 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4110 (bmsr & BMSR_LSTATUS)) {
4116 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4117 TG3_PHY_REV_BCM5401_B0 &&
4118 !(bmsr & BMSR_LSTATUS) &&
4119 tp->link_config.active_speed == SPEED_1000) {
4120 err = tg3_phy_reset(tp);
4122 err = tg3_init_5401phy_dsp(tp);
4127 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4129 /* 5701 {A0,B0} CRC bug workaround */
4130 tg3_writephy(tp, 0x15, 0x0a75);
4131 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4132 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4133 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4136 /* Clear pending interrupts... */
4137 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4140 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4141 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4142 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4143 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4147 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4149 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4151 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4154 current_link_up = 0;
4155 current_speed = SPEED_UNKNOWN;
4156 current_duplex = DUPLEX_UNKNOWN;
4157 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4158 tp->link_config.rmt_adv = 0;
4160 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4161 err = tg3_phy_auxctl_read(tp,
4162 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4164 if (!err && !(val & (1 << 10))) {
4165 tg3_phy_auxctl_write(tp,
4166 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173 for (i = 0; i < 100; i++) {
4174 tg3_readphy(tp, MII_BMSR, &bmsr);
4175 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4176 (bmsr & BMSR_LSTATUS))
4181 if (bmsr & BMSR_LSTATUS) {
4184 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4185 for (i = 0; i < 2000; i++) {
4187 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4192 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4197 for (i = 0; i < 200; i++) {
4198 tg3_readphy(tp, MII_BMCR, &bmcr);
4199 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4201 if (bmcr && bmcr != 0x7fff)
4209 tp->link_config.active_speed = current_speed;
4210 tp->link_config.active_duplex = current_duplex;
4212 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4213 if ((bmcr & BMCR_ANENABLE) &&
4214 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4215 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4216 current_link_up = 1;
4218 if (!(bmcr & BMCR_ANENABLE) &&
4219 tp->link_config.speed == current_speed &&
4220 tp->link_config.duplex == current_duplex &&
4221 tp->link_config.flowctrl ==
4222 tp->link_config.active_flowctrl) {
4223 current_link_up = 1;
4227 if (current_link_up == 1 &&
4228 tp->link_config.active_duplex == DUPLEX_FULL) {
4231 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4232 reg = MII_TG3_FET_GEN_STAT;
4233 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4235 reg = MII_TG3_EXT_STAT;
4236 bit = MII_TG3_EXT_STAT_MDIX;
4239 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4240 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4242 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4247 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248 tg3_phy_copper_begin(tp);
4250 tg3_readphy(tp, MII_BMSR, &bmsr);
4251 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4252 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4253 current_link_up = 1;
4256 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4257 if (current_link_up == 1) {
4258 if (tp->link_config.active_speed == SPEED_100 ||
4259 tp->link_config.active_speed == SPEED_10)
4260 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4262 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4263 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4264 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4266 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4268 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4269 if (tp->link_config.active_duplex == DUPLEX_HALF)
4270 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4273 if (current_link_up == 1 &&
4274 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4275 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4277 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4280 /* ??? Without this setting Netgear GA302T PHY does not
4281 * ??? send/receive packets...
4283 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4284 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4285 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4286 tw32_f(MAC_MI_MODE, tp->mi_mode);
4290 tw32_f(MAC_MODE, tp->mac_mode);
4293 tg3_phy_eee_adjust(tp, current_link_up);
4295 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4296 /* Polled via timer. */
4297 tw32_f(MAC_EVENT, 0);
4299 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4304 current_link_up == 1 &&
4305 tp->link_config.active_speed == SPEED_1000 &&
4306 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4309 (MAC_STATUS_SYNC_CHANGED |
4310 MAC_STATUS_CFG_CHANGED));
4313 NIC_SRAM_FIRMWARE_MBOX,
4314 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4317 /* Prevent send BD corruption. */
4318 if (tg3_flag(tp, CLKREQ_BUG)) {
4319 u16 oldlnkctl, newlnkctl;
4321 pci_read_config_word(tp->pdev,
4322 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4324 if (tp->link_config.active_speed == SPEED_100 ||
4325 tp->link_config.active_speed == SPEED_10)
4326 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4328 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4329 if (newlnkctl != oldlnkctl)
4330 pci_write_config_word(tp->pdev,
4331 pci_pcie_cap(tp->pdev) +
4332 PCI_EXP_LNKCTL, newlnkctl);
4335 if (current_link_up != netif_carrier_ok(tp->dev)) {
4336 if (current_link_up)
4337 netif_carrier_on(tp->dev);
4339 netif_carrier_off(tp->dev);
4340 tg3_link_report(tp);
4346 struct tg3_fiber_aneginfo {
4348 #define ANEG_STATE_UNKNOWN 0
4349 #define ANEG_STATE_AN_ENABLE 1
4350 #define ANEG_STATE_RESTART_INIT 2
4351 #define ANEG_STATE_RESTART 3
4352 #define ANEG_STATE_DISABLE_LINK_OK 4
4353 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4354 #define ANEG_STATE_ABILITY_DETECT 6
4355 #define ANEG_STATE_ACK_DETECT_INIT 7
4356 #define ANEG_STATE_ACK_DETECT 8
4357 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4358 #define ANEG_STATE_COMPLETE_ACK 10
4359 #define ANEG_STATE_IDLE_DETECT_INIT 11
4360 #define ANEG_STATE_IDLE_DETECT 12
4361 #define ANEG_STATE_LINK_OK 13
4362 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4363 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4366 #define MR_AN_ENABLE 0x00000001
4367 #define MR_RESTART_AN 0x00000002
4368 #define MR_AN_COMPLETE 0x00000004
4369 #define MR_PAGE_RX 0x00000008
4370 #define MR_NP_LOADED 0x00000010
4371 #define MR_TOGGLE_TX 0x00000020
4372 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4373 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4374 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4375 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4376 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4377 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4378 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4379 #define MR_TOGGLE_RX 0x00002000
4380 #define MR_NP_RX 0x00004000
4382 #define MR_LINK_OK 0x80000000
4384 unsigned long link_time, cur_time;
4386 u32 ability_match_cfg;
4387 int ability_match_count;
4389 char ability_match, idle_match, ack_match;
4391 u32 txconfig, rxconfig;
4392 #define ANEG_CFG_NP 0x00000080
4393 #define ANEG_CFG_ACK 0x00000040
4394 #define ANEG_CFG_RF2 0x00000020
4395 #define ANEG_CFG_RF1 0x00000010
4396 #define ANEG_CFG_PS2 0x00000001
4397 #define ANEG_CFG_PS1 0x00008000
4398 #define ANEG_CFG_HD 0x00004000
4399 #define ANEG_CFG_FD 0x00002000
4400 #define ANEG_CFG_INVAL 0x00001f06
4405 #define ANEG_TIMER_ENAB 2
4406 #define ANEG_FAILED -1
4408 #define ANEG_STATE_SETTLE_TIME 10000
4410 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4411 struct tg3_fiber_aneginfo *ap)
4414 unsigned long delta;
4418 if (ap->state == ANEG_STATE_UNKNOWN) {
4422 ap->ability_match_cfg = 0;
4423 ap->ability_match_count = 0;
4424 ap->ability_match = 0;
4430 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4431 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4433 if (rx_cfg_reg != ap->ability_match_cfg) {
4434 ap->ability_match_cfg = rx_cfg_reg;
4435 ap->ability_match = 0;
4436 ap->ability_match_count = 0;
4438 if (++ap->ability_match_count > 1) {
4439 ap->ability_match = 1;
4440 ap->ability_match_cfg = rx_cfg_reg;
4443 if (rx_cfg_reg & ANEG_CFG_ACK)
4451 ap->ability_match_cfg = 0;
4452 ap->ability_match_count = 0;
4453 ap->ability_match = 0;
4459 ap->rxconfig = rx_cfg_reg;
4462 switch (ap->state) {
4463 case ANEG_STATE_UNKNOWN:
4464 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4465 ap->state = ANEG_STATE_AN_ENABLE;
4468 case ANEG_STATE_AN_ENABLE:
4469 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4470 if (ap->flags & MR_AN_ENABLE) {
4473 ap->ability_match_cfg = 0;
4474 ap->ability_match_count = 0;
4475 ap->ability_match = 0;
4479 ap->state = ANEG_STATE_RESTART_INIT;
4481 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4485 case ANEG_STATE_RESTART_INIT:
4486 ap->link_time = ap->cur_time;
4487 ap->flags &= ~(MR_NP_LOADED);
4489 tw32(MAC_TX_AUTO_NEG, 0);
4490 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4491 tw32_f(MAC_MODE, tp->mac_mode);
4494 ret = ANEG_TIMER_ENAB;
4495 ap->state = ANEG_STATE_RESTART;
4498 case ANEG_STATE_RESTART:
4499 delta = ap->cur_time - ap->link_time;
4500 if (delta > ANEG_STATE_SETTLE_TIME)
4501 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4503 ret = ANEG_TIMER_ENAB;
4506 case ANEG_STATE_DISABLE_LINK_OK:
4510 case ANEG_STATE_ABILITY_DETECT_INIT:
4511 ap->flags &= ~(MR_TOGGLE_TX);
4512 ap->txconfig = ANEG_CFG_FD;
4513 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4514 if (flowctrl & ADVERTISE_1000XPAUSE)
4515 ap->txconfig |= ANEG_CFG_PS1;
4516 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4517 ap->txconfig |= ANEG_CFG_PS2;
4518 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4519 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4520 tw32_f(MAC_MODE, tp->mac_mode);
4523 ap->state = ANEG_STATE_ABILITY_DETECT;
4526 case ANEG_STATE_ABILITY_DETECT:
4527 if (ap->ability_match != 0 && ap->rxconfig != 0)
4528 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4531 case ANEG_STATE_ACK_DETECT_INIT:
4532 ap->txconfig |= ANEG_CFG_ACK;
4533 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535 tw32_f(MAC_MODE, tp->mac_mode);
4538 ap->state = ANEG_STATE_ACK_DETECT;
4541 case ANEG_STATE_ACK_DETECT:
4542 if (ap->ack_match != 0) {
4543 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4544 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4545 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4547 ap->state = ANEG_STATE_AN_ENABLE;
4549 } else if (ap->ability_match != 0 &&
4550 ap->rxconfig == 0) {
4551 ap->state = ANEG_STATE_AN_ENABLE;
4555 case ANEG_STATE_COMPLETE_ACK_INIT:
4556 if (ap->rxconfig & ANEG_CFG_INVAL) {
4560 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4561 MR_LP_ADV_HALF_DUPLEX |
4562 MR_LP_ADV_SYM_PAUSE |
4563 MR_LP_ADV_ASYM_PAUSE |
4564 MR_LP_ADV_REMOTE_FAULT1 |
4565 MR_LP_ADV_REMOTE_FAULT2 |
4566 MR_LP_ADV_NEXT_PAGE |
4569 if (ap->rxconfig & ANEG_CFG_FD)
4570 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4571 if (ap->rxconfig & ANEG_CFG_HD)
4572 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4573 if (ap->rxconfig & ANEG_CFG_PS1)
4574 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4575 if (ap->rxconfig & ANEG_CFG_PS2)
4576 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4577 if (ap->rxconfig & ANEG_CFG_RF1)
4578 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4579 if (ap->rxconfig & ANEG_CFG_RF2)
4580 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4581 if (ap->rxconfig & ANEG_CFG_NP)
4582 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4584 ap->link_time = ap->cur_time;
4586 ap->flags ^= (MR_TOGGLE_TX);
4587 if (ap->rxconfig & 0x0008)
4588 ap->flags |= MR_TOGGLE_RX;
4589 if (ap->rxconfig & ANEG_CFG_NP)
4590 ap->flags |= MR_NP_RX;
4591 ap->flags |= MR_PAGE_RX;
4593 ap->state = ANEG_STATE_COMPLETE_ACK;
4594 ret = ANEG_TIMER_ENAB;
4597 case ANEG_STATE_COMPLETE_ACK:
4598 if (ap->ability_match != 0 &&
4599 ap->rxconfig == 0) {
4600 ap->state = ANEG_STATE_AN_ENABLE;
4603 delta = ap->cur_time - ap->link_time;
4604 if (delta > ANEG_STATE_SETTLE_TIME) {
4605 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4606 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4608 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4609 !(ap->flags & MR_NP_RX)) {
4610 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4618 case ANEG_STATE_IDLE_DETECT_INIT:
4619 ap->link_time = ap->cur_time;
4620 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4621 tw32_f(MAC_MODE, tp->mac_mode);
4624 ap->state = ANEG_STATE_IDLE_DETECT;
4625 ret = ANEG_TIMER_ENAB;
4628 case ANEG_STATE_IDLE_DETECT:
4629 if (ap->ability_match != 0 &&
4630 ap->rxconfig == 0) {
4631 ap->state = ANEG_STATE_AN_ENABLE;
4634 delta = ap->cur_time - ap->link_time;
4635 if (delta > ANEG_STATE_SETTLE_TIME) {
4636 /* XXX another gem from the Broadcom driver :( */
4637 ap->state = ANEG_STATE_LINK_OK;
4641 case ANEG_STATE_LINK_OK:
4642 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4646 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4647 /* ??? unimplemented */
4650 case ANEG_STATE_NEXT_PAGE_WAIT:
4651 /* ??? unimplemented */
4662 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4665 struct tg3_fiber_aneginfo aninfo;
4666 int status = ANEG_FAILED;
4670 tw32_f(MAC_TX_AUTO_NEG, 0);
4672 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4673 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4676 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4679 memset(&aninfo, 0, sizeof(aninfo));
4680 aninfo.flags |= MR_AN_ENABLE;
4681 aninfo.state = ANEG_STATE_UNKNOWN;
4682 aninfo.cur_time = 0;
4684 while (++tick < 195000) {
4685 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4686 if (status == ANEG_DONE || status == ANEG_FAILED)
4692 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4693 tw32_f(MAC_MODE, tp->mac_mode);
4696 *txflags = aninfo.txconfig;
4697 *rxflags = aninfo.flags;
4699 if (status == ANEG_DONE &&
4700 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4701 MR_LP_ADV_FULL_DUPLEX)))
4707 static void tg3_init_bcm8002(struct tg3 *tp)
4709 u32 mac_status = tr32(MAC_STATUS);
4712 /* Reset when initting first time or we have a link. */
4713 if (tg3_flag(tp, INIT_COMPLETE) &&
4714 !(mac_status & MAC_STATUS_PCS_SYNCED))
4717 /* Set PLL lock range. */
4718 tg3_writephy(tp, 0x16, 0x8007);
4721 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4723 /* Wait for reset to complete. */
4724 /* XXX schedule_timeout() ... */
4725 for (i = 0; i < 500; i++)
4728 /* Config mode; select PMA/Ch 1 regs. */
4729 tg3_writephy(tp, 0x10, 0x8411);
4731 /* Enable auto-lock and comdet, select txclk for tx. */
4732 tg3_writephy(tp, 0x11, 0x0a10);
4734 tg3_writephy(tp, 0x18, 0x00a0);
4735 tg3_writephy(tp, 0x16, 0x41ff);
4737 /* Assert and deassert POR. */
4738 tg3_writephy(tp, 0x13, 0x0400);
4740 tg3_writephy(tp, 0x13, 0x0000);
4742 tg3_writephy(tp, 0x11, 0x0a50);
4744 tg3_writephy(tp, 0x11, 0x0a10);
4746 /* Wait for signal to stabilize */
4747 /* XXX schedule_timeout() ... */
4748 for (i = 0; i < 15000; i++)
4751 /* Deselect the channel register so we can read the PHYID
4754 tg3_writephy(tp, 0x10, 0x8011);
4757 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4760 u32 sg_dig_ctrl, sg_dig_status;
4761 u32 serdes_cfg, expected_sg_dig_ctrl;
4762 int workaround, port_a;
4763 int current_link_up;
4766 expected_sg_dig_ctrl = 0;
4769 current_link_up = 0;
4771 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4772 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4774 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4777 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4778 /* preserve bits 20-23 for voltage regulator */
4779 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4782 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4784 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4785 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4787 u32 val = serdes_cfg;
4793 tw32_f(MAC_SERDES_CFG, val);
4796 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4798 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4799 tg3_setup_flow_control(tp, 0, 0);
4800 current_link_up = 1;
4805 /* Want auto-negotiation. */
4806 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4808 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4809 if (flowctrl & ADVERTISE_1000XPAUSE)
4810 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4811 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4812 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4814 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4815 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4816 tp->serdes_counter &&
4817 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4818 MAC_STATUS_RCVD_CFG)) ==
4819 MAC_STATUS_PCS_SYNCED)) {
4820 tp->serdes_counter--;
4821 current_link_up = 1;
4826 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4827 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4829 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4831 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4832 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4833 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4834 MAC_STATUS_SIGNAL_DET)) {
4835 sg_dig_status = tr32(SG_DIG_STATUS);
4836 mac_status = tr32(MAC_STATUS);
4838 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4839 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4840 u32 local_adv = 0, remote_adv = 0;
4842 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4843 local_adv |= ADVERTISE_1000XPAUSE;
4844 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4845 local_adv |= ADVERTISE_1000XPSE_ASYM;
4847 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4848 remote_adv |= LPA_1000XPAUSE;
4849 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4850 remote_adv |= LPA_1000XPAUSE_ASYM;
4852 tp->link_config.rmt_adv =
4853 mii_adv_to_ethtool_adv_x(remote_adv);
4855 tg3_setup_flow_control(tp, local_adv, remote_adv);
4856 current_link_up = 1;
4857 tp->serdes_counter = 0;
4858 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4859 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4860 if (tp->serdes_counter)
4861 tp->serdes_counter--;
4864 u32 val = serdes_cfg;
4871 tw32_f(MAC_SERDES_CFG, val);
4874 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4877 /* Link parallel detection - link is up */
4878 /* only if we have PCS_SYNC and not */
4879 /* receiving config code words */
4880 mac_status = tr32(MAC_STATUS);
4881 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4882 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4883 tg3_setup_flow_control(tp, 0, 0);
4884 current_link_up = 1;
4886 TG3_PHYFLG_PARALLEL_DETECT;
4887 tp->serdes_counter =
4888 SERDES_PARALLEL_DET_TIMEOUT;
4890 goto restart_autoneg;
4894 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4895 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4899 return current_link_up;
4902 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4904 int current_link_up = 0;
4906 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4909 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910 u32 txflags, rxflags;
4913 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4914 u32 local_adv = 0, remote_adv = 0;
4916 if (txflags & ANEG_CFG_PS1)
4917 local_adv |= ADVERTISE_1000XPAUSE;
4918 if (txflags & ANEG_CFG_PS2)
4919 local_adv |= ADVERTISE_1000XPSE_ASYM;
4921 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4922 remote_adv |= LPA_1000XPAUSE;
4923 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4924 remote_adv |= LPA_1000XPAUSE_ASYM;
4926 tp->link_config.rmt_adv =
4927 mii_adv_to_ethtool_adv_x(remote_adv);
4929 tg3_setup_flow_control(tp, local_adv, remote_adv);
4931 current_link_up = 1;
4933 for (i = 0; i < 30; i++) {
4936 (MAC_STATUS_SYNC_CHANGED |
4937 MAC_STATUS_CFG_CHANGED));
4939 if ((tr32(MAC_STATUS) &
4940 (MAC_STATUS_SYNC_CHANGED |
4941 MAC_STATUS_CFG_CHANGED)) == 0)
4945 mac_status = tr32(MAC_STATUS);
4946 if (current_link_up == 0 &&
4947 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4948 !(mac_status & MAC_STATUS_RCVD_CFG))
4949 current_link_up = 1;
4951 tg3_setup_flow_control(tp, 0, 0);
4953 /* Forcing 1000FD link up. */
4954 current_link_up = 1;
4956 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4959 tw32_f(MAC_MODE, tp->mac_mode);
4964 return current_link_up;
4967 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4970 u16 orig_active_speed;
4971 u8 orig_active_duplex;
4973 int current_link_up;
4976 orig_pause_cfg = tp->link_config.active_flowctrl;
4977 orig_active_speed = tp->link_config.active_speed;
4978 orig_active_duplex = tp->link_config.active_duplex;
4980 if (!tg3_flag(tp, HW_AUTONEG) &&
4981 netif_carrier_ok(tp->dev) &&
4982 tg3_flag(tp, INIT_COMPLETE)) {
4983 mac_status = tr32(MAC_STATUS);
4984 mac_status &= (MAC_STATUS_PCS_SYNCED |
4985 MAC_STATUS_SIGNAL_DET |
4986 MAC_STATUS_CFG_CHANGED |
4987 MAC_STATUS_RCVD_CFG);
4988 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4989 MAC_STATUS_SIGNAL_DET)) {
4990 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4991 MAC_STATUS_CFG_CHANGED));
4996 tw32_f(MAC_TX_AUTO_NEG, 0);
4998 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5000 tw32_f(MAC_MODE, tp->mac_mode);
5003 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5004 tg3_init_bcm8002(tp);
5006 /* Enable link change event even when serdes polling. */
5007 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5010 current_link_up = 0;
5011 tp->link_config.rmt_adv = 0;
5012 mac_status = tr32(MAC_STATUS);
5014 if (tg3_flag(tp, HW_AUTONEG))
5015 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5017 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5019 tp->napi[0].hw_status->status =
5020 (SD_STATUS_UPDATED |
5021 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5023 for (i = 0; i < 100; i++) {
5024 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5025 MAC_STATUS_CFG_CHANGED));
5027 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5028 MAC_STATUS_CFG_CHANGED |
5029 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5033 mac_status = tr32(MAC_STATUS);
5034 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5035 current_link_up = 0;
5036 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5037 tp->serdes_counter == 0) {
5038 tw32_f(MAC_MODE, (tp->mac_mode |
5039 MAC_MODE_SEND_CONFIGS));
5041 tw32_f(MAC_MODE, tp->mac_mode);
5045 if (current_link_up == 1) {
5046 tp->link_config.active_speed = SPEED_1000;
5047 tp->link_config.active_duplex = DUPLEX_FULL;
5048 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5049 LED_CTRL_LNKLED_OVERRIDE |
5050 LED_CTRL_1000MBPS_ON));
5052 tp->link_config.active_speed = SPEED_UNKNOWN;
5053 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5054 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055 LED_CTRL_LNKLED_OVERRIDE |
5056 LED_CTRL_TRAFFIC_OVERRIDE));
5059 if (current_link_up != netif_carrier_ok(tp->dev)) {
5060 if (current_link_up)
5061 netif_carrier_on(tp->dev);
5063 netif_carrier_off(tp->dev);
5064 tg3_link_report(tp);
5066 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5067 if (orig_pause_cfg != now_pause_cfg ||
5068 orig_active_speed != tp->link_config.active_speed ||
5069 orig_active_duplex != tp->link_config.active_duplex)
5070 tg3_link_report(tp);
5076 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5078 int current_link_up, err = 0;
5082 u32 local_adv, remote_adv;
5084 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5085 tw32_f(MAC_MODE, tp->mac_mode);
5091 (MAC_STATUS_SYNC_CHANGED |
5092 MAC_STATUS_CFG_CHANGED |
5093 MAC_STATUS_MI_COMPLETION |
5094 MAC_STATUS_LNKSTATE_CHANGED));
5100 current_link_up = 0;
5101 current_speed = SPEED_UNKNOWN;
5102 current_duplex = DUPLEX_UNKNOWN;
5103 tp->link_config.rmt_adv = 0;
5105 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5108 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5109 bmsr |= BMSR_LSTATUS;
5111 bmsr &= ~BMSR_LSTATUS;
5114 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5116 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5117 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5118 /* do nothing, just check for link up at the end */
5119 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5122 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5123 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5124 ADVERTISE_1000XPAUSE |
5125 ADVERTISE_1000XPSE_ASYM |
5128 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5129 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5131 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5132 tg3_writephy(tp, MII_ADVERTISE, newadv);
5133 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5134 tg3_writephy(tp, MII_BMCR, bmcr);
5136 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5137 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5138 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145 bmcr &= ~BMCR_SPEED1000;
5146 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5148 if (tp->link_config.duplex == DUPLEX_FULL)
5149 new_bmcr |= BMCR_FULLDPLX;
5151 if (new_bmcr != bmcr) {
5152 /* BMCR_SPEED1000 is a reserved bit that needs
5153 * to be set on write.
5155 new_bmcr |= BMCR_SPEED1000;
5157 /* Force a linkdown */
5158 if (netif_carrier_ok(tp->dev)) {
5161 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5162 adv &= ~(ADVERTISE_1000XFULL |
5163 ADVERTISE_1000XHALF |
5165 tg3_writephy(tp, MII_ADVERTISE, adv);
5166 tg3_writephy(tp, MII_BMCR, bmcr |
5170 netif_carrier_off(tp->dev);
5172 tg3_writephy(tp, MII_BMCR, new_bmcr);
5174 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5178 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5179 bmsr |= BMSR_LSTATUS;
5181 bmsr &= ~BMSR_LSTATUS;
5183 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5187 if (bmsr & BMSR_LSTATUS) {
5188 current_speed = SPEED_1000;
5189 current_link_up = 1;
5190 if (bmcr & BMCR_FULLDPLX)
5191 current_duplex = DUPLEX_FULL;
5193 current_duplex = DUPLEX_HALF;
5198 if (bmcr & BMCR_ANENABLE) {
5201 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5202 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5203 common = local_adv & remote_adv;
5204 if (common & (ADVERTISE_1000XHALF |
5205 ADVERTISE_1000XFULL)) {
5206 if (common & ADVERTISE_1000XFULL)
5207 current_duplex = DUPLEX_FULL;
5209 current_duplex = DUPLEX_HALF;
5211 tp->link_config.rmt_adv =
5212 mii_adv_to_ethtool_adv_x(remote_adv);
5213 } else if (!tg3_flag(tp, 5780_CLASS)) {
5214 /* Link is up via parallel detect */
5216 current_link_up = 0;
5221 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5222 tg3_setup_flow_control(tp, local_adv, remote_adv);
5224 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5225 if (tp->link_config.active_duplex == DUPLEX_HALF)
5226 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5228 tw32_f(MAC_MODE, tp->mac_mode);
5231 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5233 tp->link_config.active_speed = current_speed;
5234 tp->link_config.active_duplex = current_duplex;
5236 if (current_link_up != netif_carrier_ok(tp->dev)) {
5237 if (current_link_up)
5238 netif_carrier_on(tp->dev);
5240 netif_carrier_off(tp->dev);
5241 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243 tg3_link_report(tp);
5248 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5250 if (tp->serdes_counter) {
5251 /* Give autoneg time to complete. */
5252 tp->serdes_counter--;
5256 if (!netif_carrier_ok(tp->dev) &&
5257 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5260 tg3_readphy(tp, MII_BMCR, &bmcr);
5261 if (bmcr & BMCR_ANENABLE) {
5264 /* Select shadow register 0x1f */
5265 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5266 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5268 /* Select expansion interrupt status register */
5269 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5270 MII_TG3_DSP_EXP1_INT_STAT);
5271 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5274 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5275 /* We have signal detect and not receiving
5276 * config code words, link is up by parallel
5280 bmcr &= ~BMCR_ANENABLE;
5281 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5282 tg3_writephy(tp, MII_BMCR, bmcr);
5283 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5286 } else if (netif_carrier_ok(tp->dev) &&
5287 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5288 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5291 /* Select expansion interrupt status register */
5292 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5293 MII_TG3_DSP_EXP1_INT_STAT);
5294 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5298 /* Config code words received, turn on autoneg. */
5299 tg3_readphy(tp, MII_BMCR, &bmcr);
5300 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5302 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5308 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5313 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5314 err = tg3_setup_fiber_phy(tp, force_reset);
5315 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5316 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5318 err = tg3_setup_copper_phy(tp, force_reset);
5320 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5323 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5324 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5326 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5331 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5332 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5333 tw32(GRC_MISC_CFG, val);
5336 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5337 (6 << TX_LENGTHS_IPG_SHIFT);
5338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5339 val |= tr32(MAC_TX_LENGTHS) &
5340 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5341 TX_LENGTHS_CNT_DWN_VAL_MSK);
5343 if (tp->link_config.active_speed == SPEED_1000 &&
5344 tp->link_config.active_duplex == DUPLEX_HALF)
5345 tw32(MAC_TX_LENGTHS, val |
5346 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5348 tw32(MAC_TX_LENGTHS, val |
5349 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5351 if (!tg3_flag(tp, 5705_PLUS)) {
5352 if (netif_carrier_ok(tp->dev)) {
5353 tw32(HOSTCC_STAT_COAL_TICKS,
5354 tp->coal.stats_block_coalesce_usecs);
5356 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5360 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5361 val = tr32(PCIE_PWR_MGMT_THRESH);
5362 if (!netif_carrier_ok(tp->dev))
5363 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5366 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5367 tw32(PCIE_PWR_MGMT_THRESH, val);
5373 static inline int tg3_irq_sync(struct tg3 *tp)
5375 return tp->irq_sync;
5378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5382 dst = (u32 *)((u8 *)dst + off);
5383 for (i = 0; i < len; i += sizeof(u32))
5384 *dst++ = tr32(off + i);
5387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5409 if (tg3_flag(tp, SUPPORT_MSIX))
5410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5421 if (!tg3_flag(tp, 5705_PLUS)) {
5422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5433 if (tg3_flag(tp, NVRAM))
5434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5437 static void tg3_dump_state(struct tg3 *tp)
5442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5444 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5448 if (tg3_flag(tp, PCI_EXPRESS)) {
5449 /* Read up to but not including private PCI registers */
5450 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5451 regs[i / sizeof(u32)] = tr32(i);
5453 tg3_dump_legacy_regs(tp, regs);
5455 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5456 if (!regs[i + 0] && !regs[i + 1] &&
5457 !regs[i + 2] && !regs[i + 3])
5460 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5462 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5467 for (i = 0; i < tp->irq_cnt; i++) {
5468 struct tg3_napi *tnapi = &tp->napi[i];
5470 /* SW status block */
5472 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5474 tnapi->hw_status->status,
5475 tnapi->hw_status->status_tag,
5476 tnapi->hw_status->rx_jumbo_consumer,
5477 tnapi->hw_status->rx_consumer,
5478 tnapi->hw_status->rx_mini_consumer,
5479 tnapi->hw_status->idx[0].rx_producer,
5480 tnapi->hw_status->idx[0].tx_consumer);
5483 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5485 tnapi->last_tag, tnapi->last_irq_tag,
5486 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5488 tnapi->prodring.rx_std_prod_idx,
5489 tnapi->prodring.rx_std_cons_idx,
5490 tnapi->prodring.rx_jmb_prod_idx,
5491 tnapi->prodring.rx_jmb_cons_idx);
5495 /* This is called whenever we suspect that the system chipset is re-
5496 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5497 * is bogus tx completions. We try to recover by setting the
5498 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5501 static void tg3_tx_recover(struct tg3 *tp)
5503 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5504 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5506 netdev_warn(tp->dev,
5507 "The system may be re-ordering memory-mapped I/O "
5508 "cycles to the network device, attempting to recover. "
5509 "Please report the problem to the driver maintainer "
5510 "and include system chipset information.\n");
5512 spin_lock(&tp->lock);
5513 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5514 spin_unlock(&tp->lock);
5517 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5519 /* Tell compiler to fetch tx indices from memory. */
5521 return tnapi->tx_pending -
5522 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5525 /* Tigon3 never reports partial packet sends. So we do not
5526 * need special logic to handle SKBs that have not had all
5527 * of their frags sent yet, like SunGEM does.
5529 static void tg3_tx(struct tg3_napi *tnapi)
5531 struct tg3 *tp = tnapi->tp;
5532 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5533 u32 sw_idx = tnapi->tx_cons;
5534 struct netdev_queue *txq;
5535 int index = tnapi - tp->napi;
5536 unsigned int pkts_compl = 0, bytes_compl = 0;
5538 if (tg3_flag(tp, ENABLE_TSS))
5541 txq = netdev_get_tx_queue(tp->dev, index);
5543 while (sw_idx != hw_idx) {
5544 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5545 struct sk_buff *skb = ri->skb;
5548 if (unlikely(skb == NULL)) {
5553 pci_unmap_single(tp->pdev,
5554 dma_unmap_addr(ri, mapping),
5560 while (ri->fragmented) {
5561 ri->fragmented = false;
5562 sw_idx = NEXT_TX(sw_idx);
5563 ri = &tnapi->tx_buffers[sw_idx];
5566 sw_idx = NEXT_TX(sw_idx);
5568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5569 ri = &tnapi->tx_buffers[sw_idx];
5570 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5573 pci_unmap_page(tp->pdev,
5574 dma_unmap_addr(ri, mapping),
5575 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5578 while (ri->fragmented) {
5579 ri->fragmented = false;
5580 sw_idx = NEXT_TX(sw_idx);
5581 ri = &tnapi->tx_buffers[sw_idx];
5584 sw_idx = NEXT_TX(sw_idx);
5588 bytes_compl += skb->len;
5592 if (unlikely(tx_bug)) {
5598 netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
5600 tnapi->tx_cons = sw_idx;
5602 /* Need to make the tx_cons update visible to tg3_start_xmit()
5603 * before checking for netif_queue_stopped(). Without the
5604 * memory barrier, there is a small possibility that tg3_start_xmit()
5605 * will miss it and cause the queue to be stopped forever.
5609 if (unlikely(netif_tx_queue_stopped(txq) &&
5610 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5611 __netif_tx_lock(txq, smp_processor_id());
5612 if (netif_tx_queue_stopped(txq) &&
5613 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5614 netif_tx_wake_queue(txq);
5615 __netif_tx_unlock(txq);
5619 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5624 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5625 map_sz, PCI_DMA_FROMDEVICE);
5630 /* Returns size of skb allocated or < 0 on error.
5632 * We only need to fill in the address because the other members
5633 * of the RX descriptor are invariant, see tg3_init_rings.
5635 * Note the purposeful assymetry of cpu vs. chip accesses. For
5636 * posting buffers we only dirty the first cache line of the RX
5637 * descriptor (containing the address). Whereas for the RX status
5638 * buffers the cpu only reads the last cacheline of the RX descriptor
5639 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5641 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5642 u32 opaque_key, u32 dest_idx_unmasked)
5644 struct tg3_rx_buffer_desc *desc;
5645 struct ring_info *map;
5648 int skb_size, data_size, dest_idx;
5650 switch (opaque_key) {
5651 case RXD_OPAQUE_RING_STD:
5652 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5653 desc = &tpr->rx_std[dest_idx];
5654 map = &tpr->rx_std_buffers[dest_idx];
5655 data_size = tp->rx_pkt_map_sz;
5658 case RXD_OPAQUE_RING_JUMBO:
5659 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5660 desc = &tpr->rx_jmb[dest_idx].std;
5661 map = &tpr->rx_jmb_buffers[dest_idx];
5662 data_size = TG3_RX_JMB_MAP_SZ;
5669 /* Do not overwrite any of the map or rp information
5670 * until we are sure we can commit to a new buffer.
5672 * Callers depend upon this behavior and assume that
5673 * we leave everything unchanged if we fail.
5675 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5676 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5677 data = kmalloc(skb_size, GFP_ATOMIC);
5681 mapping = pci_map_single(tp->pdev,
5682 data + TG3_RX_OFFSET(tp),
5684 PCI_DMA_FROMDEVICE);
5685 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5691 dma_unmap_addr_set(map, mapping, mapping);
5693 desc->addr_hi = ((u64)mapping >> 32);
5694 desc->addr_lo = ((u64)mapping & 0xffffffff);
5699 /* We only need to move over in the address because the other
5700 * members of the RX descriptor are invariant. See notes above
5701 * tg3_alloc_rx_data for full details.
5703 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5704 struct tg3_rx_prodring_set *dpr,
5705 u32 opaque_key, int src_idx,
5706 u32 dest_idx_unmasked)
5708 struct tg3 *tp = tnapi->tp;
5709 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5710 struct ring_info *src_map, *dest_map;
5711 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5714 switch (opaque_key) {
5715 case RXD_OPAQUE_RING_STD:
5716 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5717 dest_desc = &dpr->rx_std[dest_idx];
5718 dest_map = &dpr->rx_std_buffers[dest_idx];
5719 src_desc = &spr->rx_std[src_idx];
5720 src_map = &spr->rx_std_buffers[src_idx];
5723 case RXD_OPAQUE_RING_JUMBO:
5724 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5725 dest_desc = &dpr->rx_jmb[dest_idx].std;
5726 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5727 src_desc = &spr->rx_jmb[src_idx].std;
5728 src_map = &spr->rx_jmb_buffers[src_idx];
5735 dest_map->data = src_map->data;
5736 dma_unmap_addr_set(dest_map, mapping,
5737 dma_unmap_addr(src_map, mapping));
5738 dest_desc->addr_hi = src_desc->addr_hi;
5739 dest_desc->addr_lo = src_desc->addr_lo;
5741 /* Ensure that the update to the skb happens after the physical
5742 * addresses have been transferred to the new BD location.
5746 src_map->data = NULL;
5749 /* The RX ring scheme is composed of multiple rings which post fresh
5750 * buffers to the chip, and one special ring the chip uses to report
5751 * status back to the host.
5753 * The special ring reports the status of received packets to the
5754 * host. The chip does not write into the original descriptor the
5755 * RX buffer was obtained from. The chip simply takes the original
5756 * descriptor as provided by the host, updates the status and length
5757 * field, then writes this into the next status ring entry.
5759 * Each ring the host uses to post buffers to the chip is described
5760 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5761 * it is first placed into the on-chip ram. When the packet's length
5762 * is known, it walks down the TG3_BDINFO entries to select the ring.
5763 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5764 * which is within the range of the new packet's length is chosen.
5766 * The "separate ring for rx status" scheme may sound queer, but it makes
5767 * sense from a cache coherency perspective. If only the host writes
5768 * to the buffer post rings, and only the chip writes to the rx status
5769 * rings, then cache lines never move beyond shared-modified state.
5770 * If both the host and chip were to write into the same ring, cache line
5771 * eviction could occur since both entities want it in an exclusive state.
5773 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5775 struct tg3 *tp = tnapi->tp;
5776 u32 work_mask, rx_std_posted = 0;
5777 u32 std_prod_idx, jmb_prod_idx;
5778 u32 sw_idx = tnapi->rx_rcb_ptr;
5781 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5783 hw_idx = *(tnapi->rx_rcb_prod_idx);
5785 * We need to order the read of hw_idx and the read of
5786 * the opaque cookie.
5791 std_prod_idx = tpr->rx_std_prod_idx;
5792 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5793 while (sw_idx != hw_idx && budget > 0) {
5794 struct ring_info *ri;
5795 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5797 struct sk_buff *skb;
5798 dma_addr_t dma_addr;
5799 u32 opaque_key, desc_idx, *post_ptr;
5802 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5803 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5804 if (opaque_key == RXD_OPAQUE_RING_STD) {
5805 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5806 dma_addr = dma_unmap_addr(ri, mapping);
5808 post_ptr = &std_prod_idx;
5810 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5811 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5812 dma_addr = dma_unmap_addr(ri, mapping);
5814 post_ptr = &jmb_prod_idx;
5816 goto next_pkt_nopost;
5818 work_mask |= opaque_key;
5820 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5821 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5823 tg3_recycle_rx(tnapi, tpr, opaque_key,
5824 desc_idx, *post_ptr);
5826 /* Other statistics kept track of by card. */
5831 prefetch(data + TG3_RX_OFFSET(tp));
5832 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5835 if (len > TG3_RX_COPY_THRESH(tp)) {
5838 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5843 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5844 PCI_DMA_FROMDEVICE);
5846 skb = build_skb(data);
5849 goto drop_it_no_recycle;
5851 skb_reserve(skb, TG3_RX_OFFSET(tp));
5852 /* Ensure that the update to the data happens
5853 * after the usage of the old DMA mapping.
5860 tg3_recycle_rx(tnapi, tpr, opaque_key,
5861 desc_idx, *post_ptr);
5863 skb = netdev_alloc_skb(tp->dev,
5864 len + TG3_RAW_IP_ALIGN);
5866 goto drop_it_no_recycle;
5868 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5869 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5871 data + TG3_RX_OFFSET(tp),
5873 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5877 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5878 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5879 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5880 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5881 skb->ip_summed = CHECKSUM_UNNECESSARY;
5883 skb_checksum_none_assert(skb);
5885 skb->protocol = eth_type_trans(skb, tp->dev);
5887 if (len > (tp->dev->mtu + ETH_HLEN) &&
5888 skb->protocol != htons(ETH_P_8021Q)) {
5890 goto drop_it_no_recycle;
5893 if (desc->type_flags & RXD_FLAG_VLAN &&
5894 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5895 __vlan_hwaccel_put_tag(skb,
5896 desc->err_vlan & RXD_VLAN_MASK);
5898 napi_gro_receive(&tnapi->napi, skb);
5906 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5907 tpr->rx_std_prod_idx = std_prod_idx &
5908 tp->rx_std_ring_mask;
5909 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5910 tpr->rx_std_prod_idx);
5911 work_mask &= ~RXD_OPAQUE_RING_STD;
5916 sw_idx &= tp->rx_ret_ring_mask;
5918 /* Refresh hw_idx to see if there is new work */
5919 if (sw_idx == hw_idx) {
5920 hw_idx = *(tnapi->rx_rcb_prod_idx);
5925 /* ACK the status ring. */
5926 tnapi->rx_rcb_ptr = sw_idx;
5927 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5929 /* Refill RX ring(s). */
5930 if (!tg3_flag(tp, ENABLE_RSS)) {
5931 if (work_mask & RXD_OPAQUE_RING_STD) {
5932 tpr->rx_std_prod_idx = std_prod_idx &
5933 tp->rx_std_ring_mask;
5934 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5935 tpr->rx_std_prod_idx);
5937 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5938 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5939 tp->rx_jmb_ring_mask;
5940 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5941 tpr->rx_jmb_prod_idx);
5944 } else if (work_mask) {
5945 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5946 * updated before the producer indices can be updated.
5950 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5951 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5953 if (tnapi != &tp->napi[1])
5954 napi_schedule(&tp->napi[1].napi);
5960 static void tg3_poll_link(struct tg3 *tp)
5962 /* handle link change and other phy events */
5963 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5964 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5966 if (sblk->status & SD_STATUS_LINK_CHG) {
5967 sblk->status = SD_STATUS_UPDATED |
5968 (sblk->status & ~SD_STATUS_LINK_CHG);
5969 spin_lock(&tp->lock);
5970 if (tg3_flag(tp, USE_PHYLIB)) {
5972 (MAC_STATUS_SYNC_CHANGED |
5973 MAC_STATUS_CFG_CHANGED |
5974 MAC_STATUS_MI_COMPLETION |
5975 MAC_STATUS_LNKSTATE_CHANGED));
5978 tg3_setup_phy(tp, 0);
5979 spin_unlock(&tp->lock);
5984 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5985 struct tg3_rx_prodring_set *dpr,
5986 struct tg3_rx_prodring_set *spr)
5988 u32 si, di, cpycnt, src_prod_idx;
5992 src_prod_idx = spr->rx_std_prod_idx;
5994 /* Make sure updates to the rx_std_buffers[] entries and the
5995 * standard producer index are seen in the correct order.
5999 if (spr->rx_std_cons_idx == src_prod_idx)
6002 if (spr->rx_std_cons_idx < src_prod_idx)
6003 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6005 cpycnt = tp->rx_std_ring_mask + 1 -
6006 spr->rx_std_cons_idx;
6008 cpycnt = min(cpycnt,
6009 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6011 si = spr->rx_std_cons_idx;
6012 di = dpr->rx_std_prod_idx;
6014 for (i = di; i < di + cpycnt; i++) {
6015 if (dpr->rx_std_buffers[i].data) {
6025 /* Ensure that updates to the rx_std_buffers ring and the
6026 * shadowed hardware producer ring from tg3_recycle_skb() are
6027 * ordered correctly WRT the skb check above.
6031 memcpy(&dpr->rx_std_buffers[di],
6032 &spr->rx_std_buffers[si],
6033 cpycnt * sizeof(struct ring_info));
6035 for (i = 0; i < cpycnt; i++, di++, si++) {
6036 struct tg3_rx_buffer_desc *sbd, *dbd;
6037 sbd = &spr->rx_std[si];
6038 dbd = &dpr->rx_std[di];
6039 dbd->addr_hi = sbd->addr_hi;
6040 dbd->addr_lo = sbd->addr_lo;
6043 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6044 tp->rx_std_ring_mask;
6045 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6046 tp->rx_std_ring_mask;
6050 src_prod_idx = spr->rx_jmb_prod_idx;
6052 /* Make sure updates to the rx_jmb_buffers[] entries and
6053 * the jumbo producer index are seen in the correct order.
6057 if (spr->rx_jmb_cons_idx == src_prod_idx)
6060 if (spr->rx_jmb_cons_idx < src_prod_idx)
6061 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6063 cpycnt = tp->rx_jmb_ring_mask + 1 -
6064 spr->rx_jmb_cons_idx;
6066 cpycnt = min(cpycnt,
6067 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6069 si = spr->rx_jmb_cons_idx;
6070 di = dpr->rx_jmb_prod_idx;
6072 for (i = di; i < di + cpycnt; i++) {
6073 if (dpr->rx_jmb_buffers[i].data) {
6083 /* Ensure that updates to the rx_jmb_buffers ring and the
6084 * shadowed hardware producer ring from tg3_recycle_skb() are
6085 * ordered correctly WRT the skb check above.
6089 memcpy(&dpr->rx_jmb_buffers[di],
6090 &spr->rx_jmb_buffers[si],
6091 cpycnt * sizeof(struct ring_info));
6093 for (i = 0; i < cpycnt; i++, di++, si++) {
6094 struct tg3_rx_buffer_desc *sbd, *dbd;
6095 sbd = &spr->rx_jmb[si].std;
6096 dbd = &dpr->rx_jmb[di].std;
6097 dbd->addr_hi = sbd->addr_hi;
6098 dbd->addr_lo = sbd->addr_lo;
6101 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6102 tp->rx_jmb_ring_mask;
6103 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6104 tp->rx_jmb_ring_mask;
6110 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6112 struct tg3 *tp = tnapi->tp;
6114 /* run TX completion thread */
6115 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6117 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6121 /* run RX thread, within the bounds set by NAPI.
6122 * All RX "locking" is done by ensuring outside
6123 * code synchronizes with tg3->napi.poll()
6125 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6126 work_done += tg3_rx(tnapi, budget - work_done);
6128 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6129 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6131 u32 std_prod_idx = dpr->rx_std_prod_idx;
6132 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6134 for (i = 1; i < tp->irq_cnt; i++)
6135 err |= tg3_rx_prodring_xfer(tp, dpr,
6136 &tp->napi[i].prodring);
6140 if (std_prod_idx != dpr->rx_std_prod_idx)
6141 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6142 dpr->rx_std_prod_idx);
6144 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6145 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6146 dpr->rx_jmb_prod_idx);
6151 tw32_f(HOSTCC_MODE, tp->coal_now);
6157 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6159 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6160 schedule_work(&tp->reset_task);
6163 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6165 cancel_work_sync(&tp->reset_task);
6166 tg3_flag_clear(tp, RESET_TASK_PENDING);
6167 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6170 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6172 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6173 struct tg3 *tp = tnapi->tp;
6175 struct tg3_hw_status *sblk = tnapi->hw_status;
6178 work_done = tg3_poll_work(tnapi, work_done, budget);
6180 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6183 if (unlikely(work_done >= budget))
6186 /* tp->last_tag is used in tg3_int_reenable() below
6187 * to tell the hw how much work has been processed,
6188 * so we must read it before checking for more work.
6190 tnapi->last_tag = sblk->status_tag;
6191 tnapi->last_irq_tag = tnapi->last_tag;
6194 /* check for RX/TX work to do */
6195 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6196 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6197 napi_complete(napi);
6198 /* Reenable interrupts. */
6199 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6208 /* work_done is guaranteed to be less than budget. */
6209 napi_complete(napi);
6210 tg3_reset_task_schedule(tp);
6214 static void tg3_process_error(struct tg3 *tp)
6217 bool real_error = false;
6219 if (tg3_flag(tp, ERROR_PROCESSED))
6222 /* Check Flow Attention register */
6223 val = tr32(HOSTCC_FLOW_ATTN);
6224 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6225 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6229 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6230 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6234 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6235 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6244 tg3_flag_set(tp, ERROR_PROCESSED);
6245 tg3_reset_task_schedule(tp);
6248 static int tg3_poll(struct napi_struct *napi, int budget)
6250 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6251 struct tg3 *tp = tnapi->tp;
6253 struct tg3_hw_status *sblk = tnapi->hw_status;
6256 if (sblk->status & SD_STATUS_ERROR)
6257 tg3_process_error(tp);
6261 work_done = tg3_poll_work(tnapi, work_done, budget);
6263 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6266 if (unlikely(work_done >= budget))
6269 if (tg3_flag(tp, TAGGED_STATUS)) {
6270 /* tp->last_tag is used in tg3_int_reenable() below
6271 * to tell the hw how much work has been processed,
6272 * so we must read it before checking for more work.
6274 tnapi->last_tag = sblk->status_tag;
6275 tnapi->last_irq_tag = tnapi->last_tag;
6278 sblk->status &= ~SD_STATUS_UPDATED;
6280 if (likely(!tg3_has_work(tnapi))) {
6281 napi_complete(napi);
6282 tg3_int_reenable(tnapi);
6290 /* work_done is guaranteed to be less than budget. */
6291 napi_complete(napi);
6292 tg3_reset_task_schedule(tp);
6296 static void tg3_napi_disable(struct tg3 *tp)
6300 for (i = tp->irq_cnt - 1; i >= 0; i--)
6301 napi_disable(&tp->napi[i].napi);
6304 static void tg3_napi_enable(struct tg3 *tp)
6308 for (i = 0; i < tp->irq_cnt; i++)
6309 napi_enable(&tp->napi[i].napi);
6312 static void tg3_napi_init(struct tg3 *tp)
6316 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6317 for (i = 1; i < tp->irq_cnt; i++)
6318 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6321 static void tg3_napi_fini(struct tg3 *tp)
6325 for (i = 0; i < tp->irq_cnt; i++)
6326 netif_napi_del(&tp->napi[i].napi);
6329 static inline void tg3_netif_stop(struct tg3 *tp)
6331 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6332 tg3_napi_disable(tp);
6333 netif_tx_disable(tp->dev);
6336 static inline void tg3_netif_start(struct tg3 *tp)
6338 /* NOTE: unconditional netif_tx_wake_all_queues is only
6339 * appropriate so long as all callers are assured to
6340 * have free tx slots (such as after tg3_init_hw)
6342 netif_tx_wake_all_queues(tp->dev);
6344 tg3_napi_enable(tp);
6345 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6346 tg3_enable_ints(tp);
6349 static void tg3_irq_quiesce(struct tg3 *tp)
6353 BUG_ON(tp->irq_sync);
6358 for (i = 0; i < tp->irq_cnt; i++)
6359 synchronize_irq(tp->napi[i].irq_vec);
6362 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6363 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6364 * with as well. Most of the time, this is not necessary except when
6365 * shutting down the device.
6367 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6369 spin_lock_bh(&tp->lock);
6371 tg3_irq_quiesce(tp);
6374 static inline void tg3_full_unlock(struct tg3 *tp)
6376 spin_unlock_bh(&tp->lock);
6379 /* One-shot MSI handler - Chip automatically disables interrupt
6380 * after sending MSI so driver doesn't have to do it.
6382 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6384 struct tg3_napi *tnapi = dev_id;
6385 struct tg3 *tp = tnapi->tp;
6387 prefetch(tnapi->hw_status);
6389 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6391 if (likely(!tg3_irq_sync(tp)))
6392 napi_schedule(&tnapi->napi);
6397 /* MSI ISR - No need to check for interrupt sharing and no need to
6398 * flush status block and interrupt mailbox. PCI ordering rules
6399 * guarantee that MSI will arrive after the status block.
6401 static irqreturn_t tg3_msi(int irq, void *dev_id)
6403 struct tg3_napi *tnapi = dev_id;
6404 struct tg3 *tp = tnapi->tp;
6406 prefetch(tnapi->hw_status);
6408 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6410 * Writing any value to intr-mbox-0 clears PCI INTA# and
6411 * chip-internal interrupt pending events.
6412 * Writing non-zero to intr-mbox-0 additional tells the
6413 * NIC to stop sending us irqs, engaging "in-intr-handler"
6416 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6417 if (likely(!tg3_irq_sync(tp)))
6418 napi_schedule(&tnapi->napi);
6420 return IRQ_RETVAL(1);
6423 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6425 struct tg3_napi *tnapi = dev_id;
6426 struct tg3 *tp = tnapi->tp;
6427 struct tg3_hw_status *sblk = tnapi->hw_status;
6428 unsigned int handled = 1;
6430 /* In INTx mode, it is possible for the interrupt to arrive at
6431 * the CPU before the status block posted prior to the interrupt.
6432 * Reading the PCI State register will confirm whether the
6433 * interrupt is ours and will flush the status block.
6435 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6436 if (tg3_flag(tp, CHIP_RESETTING) ||
6437 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6444 * Writing any value to intr-mbox-0 clears PCI INTA# and
6445 * chip-internal interrupt pending events.
6446 * Writing non-zero to intr-mbox-0 additional tells the
6447 * NIC to stop sending us irqs, engaging "in-intr-handler"
6450 * Flush the mailbox to de-assert the IRQ immediately to prevent
6451 * spurious interrupts. The flush impacts performance but
6452 * excessive spurious interrupts can be worse in some cases.
6454 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6455 if (tg3_irq_sync(tp))
6457 sblk->status &= ~SD_STATUS_UPDATED;
6458 if (likely(tg3_has_work(tnapi))) {
6459 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6460 napi_schedule(&tnapi->napi);
6462 /* No work, shared interrupt perhaps? re-enable
6463 * interrupts, and flush that PCI write
6465 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6469 return IRQ_RETVAL(handled);
6472 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6474 struct tg3_napi *tnapi = dev_id;
6475 struct tg3 *tp = tnapi->tp;
6476 struct tg3_hw_status *sblk = tnapi->hw_status;
6477 unsigned int handled = 1;
6479 /* In INTx mode, it is possible for the interrupt to arrive at
6480 * the CPU before the status block posted prior to the interrupt.
6481 * Reading the PCI State register will confirm whether the
6482 * interrupt is ours and will flush the status block.
6484 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6485 if (tg3_flag(tp, CHIP_RESETTING) ||
6486 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6493 * writing any value to intr-mbox-0 clears PCI INTA# and
6494 * chip-internal interrupt pending events.
6495 * writing non-zero to intr-mbox-0 additional tells the
6496 * NIC to stop sending us irqs, engaging "in-intr-handler"
6499 * Flush the mailbox to de-assert the IRQ immediately to prevent
6500 * spurious interrupts. The flush impacts performance but
6501 * excessive spurious interrupts can be worse in some cases.
6503 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6506 * In a shared interrupt configuration, sometimes other devices'
6507 * interrupts will scream. We record the current status tag here
6508 * so that the above check can report that the screaming interrupts
6509 * are unhandled. Eventually they will be silenced.
6511 tnapi->last_irq_tag = sblk->status_tag;
6513 if (tg3_irq_sync(tp))
6516 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6518 napi_schedule(&tnapi->napi);
6521 return IRQ_RETVAL(handled);
6524 /* ISR for interrupt test */
6525 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6527 struct tg3_napi *tnapi = dev_id;
6528 struct tg3 *tp = tnapi->tp;
6529 struct tg3_hw_status *sblk = tnapi->hw_status;
6531 if ((sblk->status & SD_STATUS_UPDATED) ||
6532 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6533 tg3_disable_ints(tp);
6534 return IRQ_RETVAL(1);
6536 return IRQ_RETVAL(0);
6539 #ifdef CONFIG_NET_POLL_CONTROLLER
6540 static void tg3_poll_controller(struct net_device *dev)
6543 struct tg3 *tp = netdev_priv(dev);
6545 for (i = 0; i < tp->irq_cnt; i++)
6546 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6550 static void tg3_tx_timeout(struct net_device *dev)
6552 struct tg3 *tp = netdev_priv(dev);
6554 if (netif_msg_tx_err(tp)) {
6555 netdev_err(dev, "transmit timed out, resetting\n");
6559 tg3_reset_task_schedule(tp);
6562 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6563 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6565 u32 base = (u32) mapping & 0xffffffff;
6567 return (base > 0xffffdcc0) && (base + len + 8 < base);
6570 /* Test for DMA addresses > 40-bit */
6571 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6574 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6575 if (tg3_flag(tp, 40BIT_DMA_BUG))
6576 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6583 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6584 dma_addr_t mapping, u32 len, u32 flags,
6587 txbd->addr_hi = ((u64) mapping >> 32);
6588 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6589 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6590 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6593 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6594 dma_addr_t map, u32 len, u32 flags,
6597 struct tg3 *tp = tnapi->tp;
6600 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6603 if (tg3_4g_overflow_test(map, len))
6606 if (tg3_40bit_overflow_test(tp, map, len))
6609 if (tp->dma_limit) {
6610 u32 prvidx = *entry;
6611 u32 tmp_flag = flags & ~TXD_FLAG_END;
6612 while (len > tp->dma_limit && *budget) {
6613 u32 frag_len = tp->dma_limit;
6614 len -= tp->dma_limit;
6616 /* Avoid the 8byte DMA problem */
6618 len += tp->dma_limit / 2;
6619 frag_len = tp->dma_limit / 2;
6622 tnapi->tx_buffers[*entry].fragmented = true;
6624 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6625 frag_len, tmp_flag, mss, vlan);
6628 *entry = NEXT_TX(*entry);
6635 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6636 len, flags, mss, vlan);
6638 *entry = NEXT_TX(*entry);
6641 tnapi->tx_buffers[prvidx].fragmented = false;
6645 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6646 len, flags, mss, vlan);
6647 *entry = NEXT_TX(*entry);
6653 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6656 struct sk_buff *skb;
6657 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6662 pci_unmap_single(tnapi->tp->pdev,
6663 dma_unmap_addr(txb, mapping),
6667 while (txb->fragmented) {
6668 txb->fragmented = false;
6669 entry = NEXT_TX(entry);
6670 txb = &tnapi->tx_buffers[entry];
6673 for (i = 0; i <= last; i++) {
6674 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6676 entry = NEXT_TX(entry);
6677 txb = &tnapi->tx_buffers[entry];
6679 pci_unmap_page(tnapi->tp->pdev,
6680 dma_unmap_addr(txb, mapping),
6681 skb_frag_size(frag), PCI_DMA_TODEVICE);
6683 while (txb->fragmented) {
6684 txb->fragmented = false;
6685 entry = NEXT_TX(entry);
6686 txb = &tnapi->tx_buffers[entry];
6691 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6692 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6693 struct sk_buff **pskb,
6694 u32 *entry, u32 *budget,
6695 u32 base_flags, u32 mss, u32 vlan)
6697 struct tg3 *tp = tnapi->tp;
6698 struct sk_buff *new_skb, *skb = *pskb;
6699 dma_addr_t new_addr = 0;
6702 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6703 new_skb = skb_copy(skb, GFP_ATOMIC);
6705 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6707 new_skb = skb_copy_expand(skb,
6708 skb_headroom(skb) + more_headroom,
6709 skb_tailroom(skb), GFP_ATOMIC);
6715 /* New SKB is guaranteed to be linear. */
6716 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6718 /* Make sure the mapping succeeded */
6719 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6720 dev_kfree_skb(new_skb);
6723 u32 save_entry = *entry;
6725 base_flags |= TXD_FLAG_END;
6727 tnapi->tx_buffers[*entry].skb = new_skb;
6728 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6731 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6732 new_skb->len, base_flags,
6734 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6735 dev_kfree_skb(new_skb);
6746 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6748 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6749 * TSO header is greater than 80 bytes.
6751 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6753 struct sk_buff *segs, *nskb;
6754 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6756 /* Estimate the number of fragments in the worst case */
6757 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6758 netif_stop_queue(tp->dev);
6760 /* netif_tx_stop_queue() must be done before checking
6761 * checking tx index in tg3_tx_avail() below, because in
6762 * tg3_tx(), we update tx index before checking for
6763 * netif_tx_queue_stopped().
6766 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6767 return NETDEV_TX_BUSY;
6769 netif_wake_queue(tp->dev);
6772 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6774 goto tg3_tso_bug_end;
6780 tg3_start_xmit(nskb, tp->dev);
6786 return NETDEV_TX_OK;
6789 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6790 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6792 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6794 struct tg3 *tp = netdev_priv(dev);
6795 u32 len, entry, base_flags, mss, vlan = 0;
6797 int i = -1, would_hit_hwbug;
6799 struct tg3_napi *tnapi;
6800 struct netdev_queue *txq;
6803 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6804 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6805 if (tg3_flag(tp, ENABLE_TSS))
6808 budget = tg3_tx_avail(tnapi);
6810 /* We are running in BH disabled context with netif_tx_lock
6811 * and TX reclaim runs via tp->napi.poll inside of a software
6812 * interrupt. Furthermore, IRQ processing runs lockless so we have
6813 * no IRQ context deadlocks to worry about either. Rejoice!
6815 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6816 if (!netif_tx_queue_stopped(txq)) {
6817 netif_tx_stop_queue(txq);
6819 /* This is a hard error, log it. */
6821 "BUG! Tx Ring full when queue awake!\n");
6823 return NETDEV_TX_BUSY;
6826 entry = tnapi->tx_prod;
6828 if (skb->ip_summed == CHECKSUM_PARTIAL)
6829 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6831 mss = skb_shinfo(skb)->gso_size;
6834 u32 tcp_opt_len, hdr_len;
6836 if (skb_header_cloned(skb) &&
6837 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6841 tcp_opt_len = tcp_optlen(skb);
6843 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6845 if (!skb_is_gso_v6(skb)) {
6847 iph->tot_len = htons(mss + hdr_len);
6850 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6851 tg3_flag(tp, TSO_BUG))
6852 return tg3_tso_bug(tp, skb);
6854 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6855 TXD_FLAG_CPU_POST_DMA);
6857 if (tg3_flag(tp, HW_TSO_1) ||
6858 tg3_flag(tp, HW_TSO_2) ||
6859 tg3_flag(tp, HW_TSO_3)) {
6860 tcp_hdr(skb)->check = 0;
6861 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6863 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6868 if (tg3_flag(tp, HW_TSO_3)) {
6869 mss |= (hdr_len & 0xc) << 12;
6871 base_flags |= 0x00000010;
6872 base_flags |= (hdr_len & 0x3e0) << 5;
6873 } else if (tg3_flag(tp, HW_TSO_2))
6874 mss |= hdr_len << 9;
6875 else if (tg3_flag(tp, HW_TSO_1) ||
6876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6877 if (tcp_opt_len || iph->ihl > 5) {
6880 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6881 mss |= (tsflags << 11);
6884 if (tcp_opt_len || iph->ihl > 5) {
6887 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6888 base_flags |= tsflags << 12;
6893 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6894 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6895 base_flags |= TXD_FLAG_JMB_PKT;
6897 if (vlan_tx_tag_present(skb)) {
6898 base_flags |= TXD_FLAG_VLAN;
6899 vlan = vlan_tx_tag_get(skb);
6902 len = skb_headlen(skb);
6904 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6905 if (pci_dma_mapping_error(tp->pdev, mapping))
6909 tnapi->tx_buffers[entry].skb = skb;
6910 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6912 would_hit_hwbug = 0;
6914 if (tg3_flag(tp, 5701_DMA_BUG))
6915 would_hit_hwbug = 1;
6917 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6918 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6920 would_hit_hwbug = 1;
6921 } else if (skb_shinfo(skb)->nr_frags > 0) {
6924 if (!tg3_flag(tp, HW_TSO_1) &&
6925 !tg3_flag(tp, HW_TSO_2) &&
6926 !tg3_flag(tp, HW_TSO_3))
6929 /* Now loop through additional data
6930 * fragments, and queue them.
6932 last = skb_shinfo(skb)->nr_frags - 1;
6933 for (i = 0; i <= last; i++) {
6934 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6936 len = skb_frag_size(frag);
6937 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6938 len, DMA_TO_DEVICE);
6940 tnapi->tx_buffers[entry].skb = NULL;
6941 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6943 if (dma_mapping_error(&tp->pdev->dev, mapping))
6947 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6949 ((i == last) ? TXD_FLAG_END : 0),
6951 would_hit_hwbug = 1;
6957 if (would_hit_hwbug) {
6958 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6960 /* If the workaround fails due to memory/mapping
6961 * failure, silently drop this packet.
6963 entry = tnapi->tx_prod;
6964 budget = tg3_tx_avail(tnapi);
6965 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6966 base_flags, mss, vlan))
6970 skb_tx_timestamp(skb);
6971 netdev_sent_queue(tp->dev, skb->len);
6973 /* Packets are ready, update Tx producer idx local and on card. */
6974 tw32_tx_mbox(tnapi->prodmbox, entry);
6976 tnapi->tx_prod = entry;
6977 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6978 netif_tx_stop_queue(txq);
6980 /* netif_tx_stop_queue() must be done before checking
6981 * checking tx index in tg3_tx_avail() below, because in
6982 * tg3_tx(), we update tx index before checking for
6983 * netif_tx_queue_stopped().
6986 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6987 netif_tx_wake_queue(txq);
6991 return NETDEV_TX_OK;
6994 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
6995 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7000 return NETDEV_TX_OK;
7003 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7006 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7007 MAC_MODE_PORT_MODE_MASK);
7009 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7011 if (!tg3_flag(tp, 5705_PLUS))
7012 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7014 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7015 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7017 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7019 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7021 if (tg3_flag(tp, 5705_PLUS) ||
7022 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7024 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7027 tw32(MAC_MODE, tp->mac_mode);
7031 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7033 u32 val, bmcr, mac_mode, ptest = 0;
7035 tg3_phy_toggle_apd(tp, false);
7036 tg3_phy_toggle_automdix(tp, 0);
7038 if (extlpbk && tg3_phy_set_extloopbk(tp))
7041 bmcr = BMCR_FULLDPLX;
7046 bmcr |= BMCR_SPEED100;
7050 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7052 bmcr |= BMCR_SPEED100;
7055 bmcr |= BMCR_SPEED1000;
7060 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7061 tg3_readphy(tp, MII_CTRL1000, &val);
7062 val |= CTL1000_AS_MASTER |
7063 CTL1000_ENABLE_MASTER;
7064 tg3_writephy(tp, MII_CTRL1000, val);
7066 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7067 MII_TG3_FET_PTEST_TRIM_2;
7068 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7071 bmcr |= BMCR_LOOPBACK;
7073 tg3_writephy(tp, MII_BMCR, bmcr);
7075 /* The write needs to be flushed for the FETs */
7076 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7077 tg3_readphy(tp, MII_BMCR, &bmcr);
7081 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7082 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7083 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7084 MII_TG3_FET_PTEST_FRC_TX_LINK |
7085 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7087 /* The write needs to be flushed for the AC131 */
7088 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7091 /* Reset to prevent losing 1st rx packet intermittently */
7092 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7093 tg3_flag(tp, 5780_CLASS)) {
7094 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7096 tw32_f(MAC_RX_MODE, tp->rx_mode);
7099 mac_mode = tp->mac_mode &
7100 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7101 if (speed == SPEED_1000)
7102 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7104 mac_mode |= MAC_MODE_PORT_MODE_MII;
7106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7107 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7109 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7110 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7111 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7112 mac_mode |= MAC_MODE_LINK_POLARITY;
7114 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7115 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7118 tw32(MAC_MODE, mac_mode);
7124 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7126 struct tg3 *tp = netdev_priv(dev);
7128 if (features & NETIF_F_LOOPBACK) {
7129 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7132 spin_lock_bh(&tp->lock);
7133 tg3_mac_loopback(tp, true);
7134 netif_carrier_on(tp->dev);
7135 spin_unlock_bh(&tp->lock);
7136 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7138 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7141 spin_lock_bh(&tp->lock);
7142 tg3_mac_loopback(tp, false);
7143 /* Force link status check */
7144 tg3_setup_phy(tp, 1);
7145 spin_unlock_bh(&tp->lock);
7146 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7150 static netdev_features_t tg3_fix_features(struct net_device *dev,
7151 netdev_features_t features)
7153 struct tg3 *tp = netdev_priv(dev);
7155 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7156 features &= ~NETIF_F_ALL_TSO;
7161 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7163 netdev_features_t changed = dev->features ^ features;
7165 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7166 tg3_set_loopback(dev, features);
7171 static void tg3_rx_prodring_free(struct tg3 *tp,
7172 struct tg3_rx_prodring_set *tpr)
7176 if (tpr != &tp->napi[0].prodring) {
7177 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7178 i = (i + 1) & tp->rx_std_ring_mask)
7179 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7182 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7183 for (i = tpr->rx_jmb_cons_idx;
7184 i != tpr->rx_jmb_prod_idx;
7185 i = (i + 1) & tp->rx_jmb_ring_mask) {
7186 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7194 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7195 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7198 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7199 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7200 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7205 /* Initialize rx rings for packet processing.
7207 * The chip has been shut down and the driver detached from
7208 * the networking, so no interrupts or new tx packets will
7209 * end up in the driver. tp->{tx,}lock are held and thus
7212 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7213 struct tg3_rx_prodring_set *tpr)
7215 u32 i, rx_pkt_dma_sz;
7217 tpr->rx_std_cons_idx = 0;
7218 tpr->rx_std_prod_idx = 0;
7219 tpr->rx_jmb_cons_idx = 0;
7220 tpr->rx_jmb_prod_idx = 0;
7222 if (tpr != &tp->napi[0].prodring) {
7223 memset(&tpr->rx_std_buffers[0], 0,
7224 TG3_RX_STD_BUFF_RING_SIZE(tp));
7225 if (tpr->rx_jmb_buffers)
7226 memset(&tpr->rx_jmb_buffers[0], 0,
7227 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7231 /* Zero out all descriptors. */
7232 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7234 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7235 if (tg3_flag(tp, 5780_CLASS) &&
7236 tp->dev->mtu > ETH_DATA_LEN)
7237 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7238 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7240 /* Initialize invariants of the rings, we only set this
7241 * stuff once. This works because the card does not
7242 * write into the rx buffer posting rings.
7244 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7245 struct tg3_rx_buffer_desc *rxd;
7247 rxd = &tpr->rx_std[i];
7248 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7249 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7250 rxd->opaque = (RXD_OPAQUE_RING_STD |
7251 (i << RXD_OPAQUE_INDEX_SHIFT));
7254 /* Now allocate fresh SKBs for each rx ring. */
7255 for (i = 0; i < tp->rx_pending; i++) {
7256 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7257 netdev_warn(tp->dev,
7258 "Using a smaller RX standard ring. Only "
7259 "%d out of %d buffers were allocated "
7260 "successfully\n", i, tp->rx_pending);
7268 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7271 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7273 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7276 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7277 struct tg3_rx_buffer_desc *rxd;
7279 rxd = &tpr->rx_jmb[i].std;
7280 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7281 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7283 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7284 (i << RXD_OPAQUE_INDEX_SHIFT));
7287 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7288 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7289 netdev_warn(tp->dev,
7290 "Using a smaller RX jumbo ring. Only %d "
7291 "out of %d buffers were allocated "
7292 "successfully\n", i, tp->rx_jumbo_pending);
7295 tp->rx_jumbo_pending = i;
7304 tg3_rx_prodring_free(tp, tpr);
7308 static void tg3_rx_prodring_fini(struct tg3 *tp,
7309 struct tg3_rx_prodring_set *tpr)
7311 kfree(tpr->rx_std_buffers);
7312 tpr->rx_std_buffers = NULL;
7313 kfree(tpr->rx_jmb_buffers);
7314 tpr->rx_jmb_buffers = NULL;
7316 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7317 tpr->rx_std, tpr->rx_std_mapping);
7321 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7322 tpr->rx_jmb, tpr->rx_jmb_mapping);
7327 static int tg3_rx_prodring_init(struct tg3 *tp,
7328 struct tg3_rx_prodring_set *tpr)
7330 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7332 if (!tpr->rx_std_buffers)
7335 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7336 TG3_RX_STD_RING_BYTES(tp),
7337 &tpr->rx_std_mapping,
7342 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7343 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7345 if (!tpr->rx_jmb_buffers)
7348 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7349 TG3_RX_JMB_RING_BYTES(tp),
7350 &tpr->rx_jmb_mapping,
7359 tg3_rx_prodring_fini(tp, tpr);
7363 /* Free up pending packets in all rx/tx rings.
7365 * The chip has been shut down and the driver detached from
7366 * the networking, so no interrupts or new tx packets will
7367 * end up in the driver. tp->{tx,}lock is not held and we are not
7368 * in an interrupt context and thus may sleep.
7370 static void tg3_free_rings(struct tg3 *tp)
7374 for (j = 0; j < tp->irq_cnt; j++) {
7375 struct tg3_napi *tnapi = &tp->napi[j];
7377 tg3_rx_prodring_free(tp, &tnapi->prodring);
7379 if (!tnapi->tx_buffers)
7382 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7383 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7388 tg3_tx_skb_unmap(tnapi, i,
7389 skb_shinfo(skb)->nr_frags - 1);
7391 dev_kfree_skb_any(skb);
7394 netdev_reset_queue(tp->dev);
7397 /* Initialize tx/rx rings for packet processing.
7399 * The chip has been shut down and the driver detached from
7400 * the networking, so no interrupts or new tx packets will
7401 * end up in the driver. tp->{tx,}lock are held and thus
7404 static int tg3_init_rings(struct tg3 *tp)
7408 /* Free up all the SKBs. */
7411 for (i = 0; i < tp->irq_cnt; i++) {
7412 struct tg3_napi *tnapi = &tp->napi[i];
7414 tnapi->last_tag = 0;
7415 tnapi->last_irq_tag = 0;
7416 tnapi->hw_status->status = 0;
7417 tnapi->hw_status->status_tag = 0;
7418 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7423 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7425 tnapi->rx_rcb_ptr = 0;
7427 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7429 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7439 * Must not be invoked with interrupt sources disabled and
7440 * the hardware shutdown down.
7442 static void tg3_free_consistent(struct tg3 *tp)
7446 for (i = 0; i < tp->irq_cnt; i++) {
7447 struct tg3_napi *tnapi = &tp->napi[i];
7449 if (tnapi->tx_ring) {
7450 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7451 tnapi->tx_ring, tnapi->tx_desc_mapping);
7452 tnapi->tx_ring = NULL;
7455 kfree(tnapi->tx_buffers);
7456 tnapi->tx_buffers = NULL;
7458 if (tnapi->rx_rcb) {
7459 dma_free_coherent(&tp->pdev->dev,
7460 TG3_RX_RCB_RING_BYTES(tp),
7462 tnapi->rx_rcb_mapping);
7463 tnapi->rx_rcb = NULL;
7466 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7468 if (tnapi->hw_status) {
7469 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7471 tnapi->status_mapping);
7472 tnapi->hw_status = NULL;
7477 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7478 tp->hw_stats, tp->stats_mapping);
7479 tp->hw_stats = NULL;
7484 * Must not be invoked with interrupt sources disabled and
7485 * the hardware shutdown down. Can sleep.
7487 static int tg3_alloc_consistent(struct tg3 *tp)
7491 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7492 sizeof(struct tg3_hw_stats),
7498 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7500 for (i = 0; i < tp->irq_cnt; i++) {
7501 struct tg3_napi *tnapi = &tp->napi[i];
7502 struct tg3_hw_status *sblk;
7504 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7506 &tnapi->status_mapping,
7508 if (!tnapi->hw_status)
7511 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7512 sblk = tnapi->hw_status;
7514 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7517 /* If multivector TSS is enabled, vector 0 does not handle
7518 * tx interrupts. Don't allocate any resources for it.
7520 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7521 (i && tg3_flag(tp, ENABLE_TSS))) {
7522 tnapi->tx_buffers = kzalloc(
7523 sizeof(struct tg3_tx_ring_info) *
7524 TG3_TX_RING_SIZE, GFP_KERNEL);
7525 if (!tnapi->tx_buffers)
7528 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7530 &tnapi->tx_desc_mapping,
7532 if (!tnapi->tx_ring)
7537 * When RSS is enabled, the status block format changes
7538 * slightly. The "rx_jumbo_consumer", "reserved",
7539 * and "rx_mini_consumer" members get mapped to the
7540 * other three rx return ring producer indexes.
7544 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7547 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7550 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7553 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7558 * If multivector RSS is enabled, vector 0 does not handle
7559 * rx or tx interrupts. Don't allocate any resources for it.
7561 if (!i && tg3_flag(tp, ENABLE_RSS))
7564 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7565 TG3_RX_RCB_RING_BYTES(tp),
7566 &tnapi->rx_rcb_mapping,
7571 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7577 tg3_free_consistent(tp);
7581 #define MAX_WAIT_CNT 1000
7583 /* To stop a block, clear the enable bit and poll till it
7584 * clears. tp->lock is held.
7586 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7591 if (tg3_flag(tp, 5705_PLUS)) {
7598 /* We can't enable/disable these bits of the
7599 * 5705/5750, just say success.
7612 for (i = 0; i < MAX_WAIT_CNT; i++) {
7615 if ((val & enable_bit) == 0)
7619 if (i == MAX_WAIT_CNT && !silent) {
7620 dev_err(&tp->pdev->dev,
7621 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7629 /* tp->lock is held. */
7630 static int tg3_abort_hw(struct tg3 *tp, int silent)
7634 tg3_disable_ints(tp);
7636 tp->rx_mode &= ~RX_MODE_ENABLE;
7637 tw32_f(MAC_RX_MODE, tp->rx_mode);
7640 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7641 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7642 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7643 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7644 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7645 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7647 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7648 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7649 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7650 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7651 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7652 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7653 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7655 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7656 tw32_f(MAC_MODE, tp->mac_mode);
7659 tp->tx_mode &= ~TX_MODE_ENABLE;
7660 tw32_f(MAC_TX_MODE, tp->tx_mode);
7662 for (i = 0; i < MAX_WAIT_CNT; i++) {
7664 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7667 if (i >= MAX_WAIT_CNT) {
7668 dev_err(&tp->pdev->dev,
7669 "%s timed out, TX_MODE_ENABLE will not clear "
7670 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7674 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7675 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7676 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7678 tw32(FTQ_RESET, 0xffffffff);
7679 tw32(FTQ_RESET, 0x00000000);
7681 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7682 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7684 for (i = 0; i < tp->irq_cnt; i++) {
7685 struct tg3_napi *tnapi = &tp->napi[i];
7686 if (tnapi->hw_status)
7687 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7693 /* Save PCI command register before chip reset */
7694 static void tg3_save_pci_state(struct tg3 *tp)
7696 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7699 /* Restore PCI state after chip reset */
7700 static void tg3_restore_pci_state(struct tg3 *tp)
7704 /* Re-enable indirect register accesses. */
7705 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7706 tp->misc_host_ctrl);
7708 /* Set MAX PCI retry to zero. */
7709 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7710 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7711 tg3_flag(tp, PCIX_MODE))
7712 val |= PCISTATE_RETRY_SAME_DMA;
7713 /* Allow reads and writes to the APE register and memory space. */
7714 if (tg3_flag(tp, ENABLE_APE))
7715 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7716 PCISTATE_ALLOW_APE_SHMEM_WR |
7717 PCISTATE_ALLOW_APE_PSPACE_WR;
7718 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7720 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7722 if (!tg3_flag(tp, PCI_EXPRESS)) {
7723 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7724 tp->pci_cacheline_sz);
7725 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7729 /* Make sure PCI-X relaxed ordering bit is clear. */
7730 if (tg3_flag(tp, PCIX_MODE)) {
7733 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7735 pcix_cmd &= ~PCI_X_CMD_ERO;
7736 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7740 if (tg3_flag(tp, 5780_CLASS)) {
7742 /* Chip reset on 5780 will reset MSI enable bit,
7743 * so need to restore it.
7745 if (tg3_flag(tp, USING_MSI)) {
7748 pci_read_config_word(tp->pdev,
7749 tp->msi_cap + PCI_MSI_FLAGS,
7751 pci_write_config_word(tp->pdev,
7752 tp->msi_cap + PCI_MSI_FLAGS,
7753 ctrl | PCI_MSI_FLAGS_ENABLE);
7754 val = tr32(MSGINT_MODE);
7755 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7760 /* tp->lock is held. */
7761 static int tg3_chip_reset(struct tg3 *tp)
7764 void (*write_op)(struct tg3 *, u32, u32);
7769 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7771 /* No matching tg3_nvram_unlock() after this because
7772 * chip reset below will undo the nvram lock.
7774 tp->nvram_lock_cnt = 0;
7776 /* GRC_MISC_CFG core clock reset will clear the memory
7777 * enable bit in PCI register 4 and the MSI enable bit
7778 * on some chips, so we save relevant registers here.
7780 tg3_save_pci_state(tp);
7782 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7783 tg3_flag(tp, 5755_PLUS))
7784 tw32(GRC_FASTBOOT_PC, 0);
7787 * We must avoid the readl() that normally takes place.
7788 * It locks machines, causes machine checks, and other
7789 * fun things. So, temporarily disable the 5701
7790 * hardware workaround, while we do the reset.
7792 write_op = tp->write32;
7793 if (write_op == tg3_write_flush_reg32)
7794 tp->write32 = tg3_write32;
7796 /* Prevent the irq handler from reading or writing PCI registers
7797 * during chip reset when the memory enable bit in the PCI command
7798 * register may be cleared. The chip does not generate interrupt
7799 * at this time, but the irq handler may still be called due to irq
7800 * sharing or irqpoll.
7802 tg3_flag_set(tp, CHIP_RESETTING);
7803 for (i = 0; i < tp->irq_cnt; i++) {
7804 struct tg3_napi *tnapi = &tp->napi[i];
7805 if (tnapi->hw_status) {
7806 tnapi->hw_status->status = 0;
7807 tnapi->hw_status->status_tag = 0;
7809 tnapi->last_tag = 0;
7810 tnapi->last_irq_tag = 0;
7814 for (i = 0; i < tp->irq_cnt; i++)
7815 synchronize_irq(tp->napi[i].irq_vec);
7817 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7818 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7819 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7823 val = GRC_MISC_CFG_CORECLK_RESET;
7825 if (tg3_flag(tp, PCI_EXPRESS)) {
7826 /* Force PCIe 1.0a mode */
7827 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7828 !tg3_flag(tp, 57765_PLUS) &&
7829 tr32(TG3_PCIE_PHY_TSTCTL) ==
7830 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7831 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7833 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7834 tw32(GRC_MISC_CFG, (1 << 29));
7839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7840 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7841 tw32(GRC_VCPU_EXT_CTRL,
7842 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7845 /* Manage gphy power for all CPMU absent PCIe devices. */
7846 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7847 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7849 tw32(GRC_MISC_CFG, val);
7851 /* restore 5701 hardware bug workaround write method */
7852 tp->write32 = write_op;
7854 /* Unfortunately, we have to delay before the PCI read back.
7855 * Some 575X chips even will not respond to a PCI cfg access
7856 * when the reset command is given to the chip.
7858 * How do these hardware designers expect things to work
7859 * properly if the PCI write is posted for a long period
7860 * of time? It is always necessary to have some method by
7861 * which a register read back can occur to push the write
7862 * out which does the reset.
7864 * For most tg3 variants the trick below was working.
7869 /* Flush PCI posted writes. The normal MMIO registers
7870 * are inaccessible at this time so this is the only
7871 * way to make this reliably (actually, this is no longer
7872 * the case, see above). I tried to use indirect
7873 * register read/write but this upset some 5701 variants.
7875 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7879 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7882 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7886 /* Wait for link training to complete. */
7887 for (i = 0; i < 5000; i++)
7890 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7891 pci_write_config_dword(tp->pdev, 0xc4,
7892 cfg_val | (1 << 15));
7895 /* Clear the "no snoop" and "relaxed ordering" bits. */
7896 pci_read_config_word(tp->pdev,
7897 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7899 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7900 PCI_EXP_DEVCTL_NOSNOOP_EN);
7902 * Older PCIe devices only support the 128 byte
7903 * MPS setting. Enforce the restriction.
7905 if (!tg3_flag(tp, CPMU_PRESENT))
7906 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7907 pci_write_config_word(tp->pdev,
7908 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7911 /* Clear error status */
7912 pci_write_config_word(tp->pdev,
7913 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7914 PCI_EXP_DEVSTA_CED |
7915 PCI_EXP_DEVSTA_NFED |
7916 PCI_EXP_DEVSTA_FED |
7917 PCI_EXP_DEVSTA_URD);
7920 tg3_restore_pci_state(tp);
7922 tg3_flag_clear(tp, CHIP_RESETTING);
7923 tg3_flag_clear(tp, ERROR_PROCESSED);
7926 if (tg3_flag(tp, 5780_CLASS))
7927 val = tr32(MEMARB_MODE);
7928 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7930 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7932 tw32(0x5000, 0x400);
7935 tw32(GRC_MODE, tp->grc_mode);
7937 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7940 tw32(0xc4, val | (1 << 15));
7943 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7945 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7946 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7947 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7948 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7951 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7952 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7954 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7955 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7960 tw32_f(MAC_MODE, val);
7963 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7965 err = tg3_poll_fw(tp);
7971 if (tg3_flag(tp, PCI_EXPRESS) &&
7972 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7973 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7974 !tg3_flag(tp, 57765_PLUS)) {
7977 tw32(0x7c00, val | (1 << 25));
7980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7981 val = tr32(TG3_CPMU_CLCK_ORIDE);
7982 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7985 /* Reprobe ASF enable state. */
7986 tg3_flag_clear(tp, ENABLE_ASF);
7987 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7988 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7989 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7992 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7993 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7994 tg3_flag_set(tp, ENABLE_ASF);
7995 tp->last_event_jiffies = jiffies;
7996 if (tg3_flag(tp, 5750_PLUS))
7997 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8004 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8005 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8007 /* tp->lock is held. */
8008 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8014 tg3_write_sig_pre_reset(tp, kind);
8016 tg3_abort_hw(tp, silent);
8017 err = tg3_chip_reset(tp);
8019 __tg3_set_mac_addr(tp, 0);
8021 tg3_write_sig_legacy(tp, kind);
8022 tg3_write_sig_post_reset(tp, kind);
8025 /* Save the stats across chip resets... */
8026 tg3_get_nstats(tp, &tp->net_stats_prev);
8027 tg3_get_estats(tp, &tp->estats_prev);
8029 /* And make sure the next sample is new data */
8030 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8039 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8041 struct tg3 *tp = netdev_priv(dev);
8042 struct sockaddr *addr = p;
8043 int err = 0, skip_mac_1 = 0;
8045 if (!is_valid_ether_addr(addr->sa_data))
8046 return -EADDRNOTAVAIL;
8048 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8050 if (!netif_running(dev))
8053 if (tg3_flag(tp, ENABLE_ASF)) {
8054 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8056 addr0_high = tr32(MAC_ADDR_0_HIGH);
8057 addr0_low = tr32(MAC_ADDR_0_LOW);
8058 addr1_high = tr32(MAC_ADDR_1_HIGH);
8059 addr1_low = tr32(MAC_ADDR_1_LOW);
8061 /* Skip MAC addr 1 if ASF is using it. */
8062 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8063 !(addr1_high == 0 && addr1_low == 0))
8066 spin_lock_bh(&tp->lock);
8067 __tg3_set_mac_addr(tp, skip_mac_1);
8068 spin_unlock_bh(&tp->lock);
8073 /* tp->lock is held. */
8074 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8075 dma_addr_t mapping, u32 maxlen_flags,
8079 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8080 ((u64) mapping >> 32));
8082 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8083 ((u64) mapping & 0xffffffff));
8085 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8088 if (!tg3_flag(tp, 5705_PLUS))
8090 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8094 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8098 if (!tg3_flag(tp, ENABLE_TSS)) {
8099 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8100 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8101 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8103 tw32(HOSTCC_TXCOL_TICKS, 0);
8104 tw32(HOSTCC_TXMAX_FRAMES, 0);
8105 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8108 if (!tg3_flag(tp, ENABLE_RSS)) {
8109 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8110 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8111 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8113 tw32(HOSTCC_RXCOL_TICKS, 0);
8114 tw32(HOSTCC_RXMAX_FRAMES, 0);
8115 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8118 if (!tg3_flag(tp, 5705_PLUS)) {
8119 u32 val = ec->stats_block_coalesce_usecs;
8121 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8122 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8124 if (!netif_carrier_ok(tp->dev))
8127 tw32(HOSTCC_STAT_COAL_TICKS, val);
8130 for (i = 0; i < tp->irq_cnt - 1; i++) {
8133 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8134 tw32(reg, ec->rx_coalesce_usecs);
8135 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8136 tw32(reg, ec->rx_max_coalesced_frames);
8137 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8138 tw32(reg, ec->rx_max_coalesced_frames_irq);
8140 if (tg3_flag(tp, ENABLE_TSS)) {
8141 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8142 tw32(reg, ec->tx_coalesce_usecs);
8143 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8144 tw32(reg, ec->tx_max_coalesced_frames);
8145 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8146 tw32(reg, ec->tx_max_coalesced_frames_irq);
8150 for (; i < tp->irq_max - 1; i++) {
8151 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8152 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8153 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8155 if (tg3_flag(tp, ENABLE_TSS)) {
8156 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8157 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8158 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8163 /* tp->lock is held. */
8164 static void tg3_rings_reset(struct tg3 *tp)
8167 u32 stblk, txrcb, rxrcb, limit;
8168 struct tg3_napi *tnapi = &tp->napi[0];
8170 /* Disable all transmit rings but the first. */
8171 if (!tg3_flag(tp, 5705_PLUS))
8172 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8173 else if (tg3_flag(tp, 5717_PLUS))
8174 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8175 else if (tg3_flag(tp, 57765_CLASS))
8176 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8178 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8180 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8181 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8182 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8183 BDINFO_FLAGS_DISABLED);
8186 /* Disable all receive return rings but the first. */
8187 if (tg3_flag(tp, 5717_PLUS))
8188 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8189 else if (!tg3_flag(tp, 5705_PLUS))
8190 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8191 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8192 tg3_flag(tp, 57765_CLASS))
8193 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8195 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8197 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8198 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8199 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8200 BDINFO_FLAGS_DISABLED);
8202 /* Disable interrupts */
8203 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8204 tp->napi[0].chk_msi_cnt = 0;
8205 tp->napi[0].last_rx_cons = 0;
8206 tp->napi[0].last_tx_cons = 0;
8208 /* Zero mailbox registers. */
8209 if (tg3_flag(tp, SUPPORT_MSIX)) {
8210 for (i = 1; i < tp->irq_max; i++) {
8211 tp->napi[i].tx_prod = 0;
8212 tp->napi[i].tx_cons = 0;
8213 if (tg3_flag(tp, ENABLE_TSS))
8214 tw32_mailbox(tp->napi[i].prodmbox, 0);
8215 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8216 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8217 tp->napi[i].chk_msi_cnt = 0;
8218 tp->napi[i].last_rx_cons = 0;
8219 tp->napi[i].last_tx_cons = 0;
8221 if (!tg3_flag(tp, ENABLE_TSS))
8222 tw32_mailbox(tp->napi[0].prodmbox, 0);
8224 tp->napi[0].tx_prod = 0;
8225 tp->napi[0].tx_cons = 0;
8226 tw32_mailbox(tp->napi[0].prodmbox, 0);
8227 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8230 /* Make sure the NIC-based send BD rings are disabled. */
8231 if (!tg3_flag(tp, 5705_PLUS)) {
8232 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8233 for (i = 0; i < 16; i++)
8234 tw32_tx_mbox(mbox + i * 8, 0);
8237 txrcb = NIC_SRAM_SEND_RCB;
8238 rxrcb = NIC_SRAM_RCV_RET_RCB;
8240 /* Clear status block in ram. */
8241 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8243 /* Set status block DMA address */
8244 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8245 ((u64) tnapi->status_mapping >> 32));
8246 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8247 ((u64) tnapi->status_mapping & 0xffffffff));
8249 if (tnapi->tx_ring) {
8250 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8251 (TG3_TX_RING_SIZE <<
8252 BDINFO_FLAGS_MAXLEN_SHIFT),
8253 NIC_SRAM_TX_BUFFER_DESC);
8254 txrcb += TG3_BDINFO_SIZE;
8257 if (tnapi->rx_rcb) {
8258 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8259 (tp->rx_ret_ring_mask + 1) <<
8260 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8261 rxrcb += TG3_BDINFO_SIZE;
8264 stblk = HOSTCC_STATBLCK_RING1;
8266 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8267 u64 mapping = (u64)tnapi->status_mapping;
8268 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8269 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8271 /* Clear status block in ram. */
8272 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8274 if (tnapi->tx_ring) {
8275 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8276 (TG3_TX_RING_SIZE <<
8277 BDINFO_FLAGS_MAXLEN_SHIFT),
8278 NIC_SRAM_TX_BUFFER_DESC);
8279 txrcb += TG3_BDINFO_SIZE;
8282 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8283 ((tp->rx_ret_ring_mask + 1) <<
8284 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8287 rxrcb += TG3_BDINFO_SIZE;
8291 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8293 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8295 if (!tg3_flag(tp, 5750_PLUS) ||
8296 tg3_flag(tp, 5780_CLASS) ||
8297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8299 tg3_flag(tp, 57765_PLUS))
8300 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8301 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8303 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8305 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8307 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8308 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8310 val = min(nic_rep_thresh, host_rep_thresh);
8311 tw32(RCVBDI_STD_THRESH, val);
8313 if (tg3_flag(tp, 57765_PLUS))
8314 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8316 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8319 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8321 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8323 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8324 tw32(RCVBDI_JUMBO_THRESH, val);
8326 if (tg3_flag(tp, 57765_PLUS))
8327 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8330 static inline u32 calc_crc(unsigned char *buf, int len)
8338 for (j = 0; j < len; j++) {
8341 for (k = 0; k < 8; k++) {
8354 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8356 /* accept or reject all multicast frames */
8357 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8358 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8359 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8360 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8363 static void __tg3_set_rx_mode(struct net_device *dev)
8365 struct tg3 *tp = netdev_priv(dev);
8368 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8369 RX_MODE_KEEP_VLAN_TAG);
8371 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8372 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8375 if (!tg3_flag(tp, ENABLE_ASF))
8376 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8379 if (dev->flags & IFF_PROMISC) {
8380 /* Promiscuous mode. */
8381 rx_mode |= RX_MODE_PROMISC;
8382 } else if (dev->flags & IFF_ALLMULTI) {
8383 /* Accept all multicast. */
8384 tg3_set_multi(tp, 1);
8385 } else if (netdev_mc_empty(dev)) {
8386 /* Reject all multicast. */
8387 tg3_set_multi(tp, 0);
8389 /* Accept one or more multicast(s). */
8390 struct netdev_hw_addr *ha;
8391 u32 mc_filter[4] = { 0, };
8396 netdev_for_each_mc_addr(ha, dev) {
8397 crc = calc_crc(ha->addr, ETH_ALEN);
8399 regidx = (bit & 0x60) >> 5;
8401 mc_filter[regidx] |= (1 << bit);
8404 tw32(MAC_HASH_REG_0, mc_filter[0]);
8405 tw32(MAC_HASH_REG_1, mc_filter[1]);
8406 tw32(MAC_HASH_REG_2, mc_filter[2]);
8407 tw32(MAC_HASH_REG_3, mc_filter[3]);
8410 if (rx_mode != tp->rx_mode) {
8411 tp->rx_mode = rx_mode;
8412 tw32_f(MAC_RX_MODE, rx_mode);
8417 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8421 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8422 tp->rss_ind_tbl[i] =
8423 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8426 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8430 if (!tg3_flag(tp, SUPPORT_MSIX))
8433 if (tp->irq_cnt <= 2) {
8434 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8438 /* Validate table against current IRQ count */
8439 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8440 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8444 if (i != TG3_RSS_INDIR_TBL_SIZE)
8445 tg3_rss_init_dflt_indir_tbl(tp);
8448 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8451 u32 reg = MAC_RSS_INDIR_TBL_0;
8453 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8454 u32 val = tp->rss_ind_tbl[i];
8456 for (; i % 8; i++) {
8458 val |= tp->rss_ind_tbl[i];
8465 /* tp->lock is held. */
8466 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8468 u32 val, rdmac_mode;
8470 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8472 tg3_disable_ints(tp);
8476 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8478 if (tg3_flag(tp, INIT_COMPLETE))
8479 tg3_abort_hw(tp, 1);
8481 /* Enable MAC control of LPI */
8482 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8483 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8484 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8485 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8487 tw32_f(TG3_CPMU_EEE_CTRL,
8488 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8490 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8491 TG3_CPMU_EEEMD_LPI_IN_TX |
8492 TG3_CPMU_EEEMD_LPI_IN_RX |
8493 TG3_CPMU_EEEMD_EEE_ENABLE;
8495 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8496 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8498 if (tg3_flag(tp, ENABLE_APE))
8499 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8501 tw32_f(TG3_CPMU_EEE_MODE, val);
8503 tw32_f(TG3_CPMU_EEE_DBTMR1,
8504 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8505 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8507 tw32_f(TG3_CPMU_EEE_DBTMR2,
8508 TG3_CPMU_DBTMR2_APE_TX_2047US |
8509 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8515 err = tg3_chip_reset(tp);
8519 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8521 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8522 val = tr32(TG3_CPMU_CTRL);
8523 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8524 tw32(TG3_CPMU_CTRL, val);
8526 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8527 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8528 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8529 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8531 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8532 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8533 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8534 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8536 val = tr32(TG3_CPMU_HST_ACC);
8537 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8538 val |= CPMU_HST_ACC_MACCLK_6_25;
8539 tw32(TG3_CPMU_HST_ACC, val);
8542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8543 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8544 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8545 PCIE_PWR_MGMT_L1_THRESH_4MS;
8546 tw32(PCIE_PWR_MGMT_THRESH, val);
8548 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8549 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8551 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8553 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8554 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8557 if (tg3_flag(tp, L1PLLPD_EN)) {
8558 u32 grc_mode = tr32(GRC_MODE);
8560 /* Access the lower 1K of PL PCIE block registers. */
8561 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8562 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8564 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8565 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8566 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8568 tw32(GRC_MODE, grc_mode);
8571 if (tg3_flag(tp, 57765_CLASS)) {
8572 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8573 u32 grc_mode = tr32(GRC_MODE);
8575 /* Access the lower 1K of PL PCIE block registers. */
8576 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8577 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8579 val = tr32(TG3_PCIE_TLDLPL_PORT +
8580 TG3_PCIE_PL_LO_PHYCTL5);
8581 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8582 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8584 tw32(GRC_MODE, grc_mode);
8587 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8588 u32 grc_mode = tr32(GRC_MODE);
8590 /* Access the lower 1K of DL PCIE block registers. */
8591 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8592 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8594 val = tr32(TG3_PCIE_TLDLPL_PORT +
8595 TG3_PCIE_DL_LO_FTSMAX);
8596 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8597 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8598 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8600 tw32(GRC_MODE, grc_mode);
8603 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8604 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8605 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8606 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8609 /* This works around an issue with Athlon chipsets on
8610 * B3 tigon3 silicon. This bit has no effect on any
8611 * other revision. But do not set this on PCI Express
8612 * chips and don't even touch the clocks if the CPMU is present.
8614 if (!tg3_flag(tp, CPMU_PRESENT)) {
8615 if (!tg3_flag(tp, PCI_EXPRESS))
8616 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8617 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8620 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8621 tg3_flag(tp, PCIX_MODE)) {
8622 val = tr32(TG3PCI_PCISTATE);
8623 val |= PCISTATE_RETRY_SAME_DMA;
8624 tw32(TG3PCI_PCISTATE, val);
8627 if (tg3_flag(tp, ENABLE_APE)) {
8628 /* Allow reads and writes to the
8629 * APE register and memory space.
8631 val = tr32(TG3PCI_PCISTATE);
8632 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8633 PCISTATE_ALLOW_APE_SHMEM_WR |
8634 PCISTATE_ALLOW_APE_PSPACE_WR;
8635 tw32(TG3PCI_PCISTATE, val);
8638 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8639 /* Enable some hw fixes. */
8640 val = tr32(TG3PCI_MSI_DATA);
8641 val |= (1 << 26) | (1 << 28) | (1 << 29);
8642 tw32(TG3PCI_MSI_DATA, val);
8645 /* Descriptor ring init may make accesses to the
8646 * NIC SRAM area to setup the TX descriptors, so we
8647 * can only do this after the hardware has been
8648 * successfully reset.
8650 err = tg3_init_rings(tp);
8654 if (tg3_flag(tp, 57765_PLUS)) {
8655 val = tr32(TG3PCI_DMA_RW_CTRL) &
8656 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8657 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8658 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8659 if (!tg3_flag(tp, 57765_CLASS) &&
8660 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8661 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8662 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8663 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8664 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8665 /* This value is determined during the probe time DMA
8666 * engine test, tg3_test_dma.
8668 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8671 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8672 GRC_MODE_4X_NIC_SEND_RINGS |
8673 GRC_MODE_NO_TX_PHDR_CSUM |
8674 GRC_MODE_NO_RX_PHDR_CSUM);
8675 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8677 /* Pseudo-header checksum is done by hardware logic and not
8678 * the offload processers, so make the chip do the pseudo-
8679 * header checksums on receive. For transmit it is more
8680 * convenient to do the pseudo-header checksum in software
8681 * as Linux does that on transmit for us in all cases.
8683 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8687 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8689 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8690 val = tr32(GRC_MISC_CFG);
8692 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8693 tw32(GRC_MISC_CFG, val);
8695 /* Initialize MBUF/DESC pool. */
8696 if (tg3_flag(tp, 5750_PLUS)) {
8698 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8699 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8701 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8703 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8704 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8705 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8706 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8709 fw_len = tp->fw_len;
8710 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8711 tw32(BUFMGR_MB_POOL_ADDR,
8712 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8713 tw32(BUFMGR_MB_POOL_SIZE,
8714 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8717 if (tp->dev->mtu <= ETH_DATA_LEN) {
8718 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8719 tp->bufmgr_config.mbuf_read_dma_low_water);
8720 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8721 tp->bufmgr_config.mbuf_mac_rx_low_water);
8722 tw32(BUFMGR_MB_HIGH_WATER,
8723 tp->bufmgr_config.mbuf_high_water);
8725 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8726 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8727 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8728 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8729 tw32(BUFMGR_MB_HIGH_WATER,
8730 tp->bufmgr_config.mbuf_high_water_jumbo);
8732 tw32(BUFMGR_DMA_LOW_WATER,
8733 tp->bufmgr_config.dma_low_water);
8734 tw32(BUFMGR_DMA_HIGH_WATER,
8735 tp->bufmgr_config.dma_high_water);
8737 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8738 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8739 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8741 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8742 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8743 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8744 tw32(BUFMGR_MODE, val);
8745 for (i = 0; i < 2000; i++) {
8746 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8751 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8755 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8756 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8758 tg3_setup_rxbd_thresholds(tp);
8760 /* Initialize TG3_BDINFO's at:
8761 * RCVDBDI_STD_BD: standard eth size rx ring
8762 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8763 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8766 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8767 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8768 * ring attribute flags
8769 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8771 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8772 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8774 * The size of each ring is fixed in the firmware, but the location is
8777 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8778 ((u64) tpr->rx_std_mapping >> 32));
8779 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8780 ((u64) tpr->rx_std_mapping & 0xffffffff));
8781 if (!tg3_flag(tp, 5717_PLUS))
8782 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8783 NIC_SRAM_RX_BUFFER_DESC);
8785 /* Disable the mini ring */
8786 if (!tg3_flag(tp, 5705_PLUS))
8787 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8788 BDINFO_FLAGS_DISABLED);
8790 /* Program the jumbo buffer descriptor ring control
8791 * blocks on those devices that have them.
8793 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8794 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8796 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8797 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8798 ((u64) tpr->rx_jmb_mapping >> 32));
8799 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8800 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8801 val = TG3_RX_JMB_RING_SIZE(tp) <<
8802 BDINFO_FLAGS_MAXLEN_SHIFT;
8803 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8804 val | BDINFO_FLAGS_USE_EXT_RECV);
8805 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8806 tg3_flag(tp, 57765_CLASS))
8807 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8808 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8810 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8811 BDINFO_FLAGS_DISABLED);
8814 if (tg3_flag(tp, 57765_PLUS)) {
8815 val = TG3_RX_STD_RING_SIZE(tp);
8816 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8817 val |= (TG3_RX_STD_DMA_SZ << 2);
8819 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8821 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8823 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8825 tpr->rx_std_prod_idx = tp->rx_pending;
8826 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8828 tpr->rx_jmb_prod_idx =
8829 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8830 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8832 tg3_rings_reset(tp);
8834 /* Initialize MAC address and backoff seed. */
8835 __tg3_set_mac_addr(tp, 0);
8837 /* MTU + ethernet header + FCS + optional VLAN tag */
8838 tw32(MAC_RX_MTU_SIZE,
8839 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8841 /* The slot time is changed by tg3_setup_phy if we
8842 * run at gigabit with half duplex.
8844 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8845 (6 << TX_LENGTHS_IPG_SHIFT) |
8846 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8848 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8849 val |= tr32(MAC_TX_LENGTHS) &
8850 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8851 TX_LENGTHS_CNT_DWN_VAL_MSK);
8853 tw32(MAC_TX_LENGTHS, val);
8855 /* Receive rules. */
8856 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8857 tw32(RCVLPC_CONFIG, 0x0181);
8859 /* Calculate RDMAC_MODE setting early, we need it to determine
8860 * the RCVLPC_STATE_ENABLE mask.
8862 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8863 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8864 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8865 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8866 RDMAC_MODE_LNGREAD_ENAB);
8868 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8869 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8871 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8872 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8873 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8874 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8875 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8876 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8878 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8879 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8880 if (tg3_flag(tp, TSO_CAPABLE) &&
8881 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8882 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8883 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8884 !tg3_flag(tp, IS_5788)) {
8885 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8889 if (tg3_flag(tp, PCI_EXPRESS))
8890 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8892 if (tg3_flag(tp, HW_TSO_1) ||
8893 tg3_flag(tp, HW_TSO_2) ||
8894 tg3_flag(tp, HW_TSO_3))
8895 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8897 if (tg3_flag(tp, 57765_PLUS) ||
8898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8899 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8900 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8902 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8903 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8909 tg3_flag(tp, 57765_PLUS)) {
8910 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8912 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8913 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8914 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8915 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8916 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8917 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8918 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8920 tw32(TG3_RDMA_RSRVCTRL_REG,
8921 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8926 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8927 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8928 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8929 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8932 /* Receive/send statistics. */
8933 if (tg3_flag(tp, 5750_PLUS)) {
8934 val = tr32(RCVLPC_STATS_ENABLE);
8935 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8936 tw32(RCVLPC_STATS_ENABLE, val);
8937 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8938 tg3_flag(tp, TSO_CAPABLE)) {
8939 val = tr32(RCVLPC_STATS_ENABLE);
8940 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8941 tw32(RCVLPC_STATS_ENABLE, val);
8943 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8945 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8946 tw32(SNDDATAI_STATSENAB, 0xffffff);
8947 tw32(SNDDATAI_STATSCTRL,
8948 (SNDDATAI_SCTRL_ENABLE |
8949 SNDDATAI_SCTRL_FASTUPD));
8951 /* Setup host coalescing engine. */
8952 tw32(HOSTCC_MODE, 0);
8953 for (i = 0; i < 2000; i++) {
8954 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8959 __tg3_set_coalesce(tp, &tp->coal);
8961 if (!tg3_flag(tp, 5705_PLUS)) {
8962 /* Status/statistics block address. See tg3_timer,
8963 * the tg3_periodic_fetch_stats call there, and
8964 * tg3_get_stats to see how this works for 5705/5750 chips.
8966 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8967 ((u64) tp->stats_mapping >> 32));
8968 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8969 ((u64) tp->stats_mapping & 0xffffffff));
8970 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8972 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8974 /* Clear statistics and status block memory areas */
8975 for (i = NIC_SRAM_STATS_BLK;
8976 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8978 tg3_write_mem(tp, i, 0);
8983 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8985 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8986 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8987 if (!tg3_flag(tp, 5705_PLUS))
8988 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8990 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8991 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8992 /* reset to prevent losing 1st rx packet intermittently */
8993 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8997 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8998 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8999 MAC_MODE_FHDE_ENABLE;
9000 if (tg3_flag(tp, ENABLE_APE))
9001 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9002 if (!tg3_flag(tp, 5705_PLUS) &&
9003 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9004 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9005 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9006 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9009 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9010 * If TG3_FLAG_IS_NIC is zero, we should read the
9011 * register to preserve the GPIO settings for LOMs. The GPIOs,
9012 * whether used as inputs or outputs, are set by boot code after
9015 if (!tg3_flag(tp, IS_NIC)) {
9018 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9019 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9020 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9022 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9023 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9024 GRC_LCLCTRL_GPIO_OUTPUT3;
9026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9027 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9029 tp->grc_local_ctrl &= ~gpio_mask;
9030 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9032 /* GPIO1 must be driven high for eeprom write protect */
9033 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9034 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9035 GRC_LCLCTRL_GPIO_OUTPUT1);
9037 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9040 if (tg3_flag(tp, USING_MSIX)) {
9041 val = tr32(MSGINT_MODE);
9042 val |= MSGINT_MODE_ENABLE;
9043 if (tp->irq_cnt > 1)
9044 val |= MSGINT_MODE_MULTIVEC_EN;
9045 if (!tg3_flag(tp, 1SHOT_MSI))
9046 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9047 tw32(MSGINT_MODE, val);
9050 if (!tg3_flag(tp, 5705_PLUS)) {
9051 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9055 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9056 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9057 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9058 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9059 WDMAC_MODE_LNGREAD_ENAB);
9061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9062 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9063 if (tg3_flag(tp, TSO_CAPABLE) &&
9064 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9065 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9067 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9068 !tg3_flag(tp, IS_5788)) {
9069 val |= WDMAC_MODE_RX_ACCEL;
9073 /* Enable host coalescing bug fix */
9074 if (tg3_flag(tp, 5755_PLUS))
9075 val |= WDMAC_MODE_STATUS_TAG_FIX;
9077 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9078 val |= WDMAC_MODE_BURST_ALL_DATA;
9080 tw32_f(WDMAC_MODE, val);
9083 if (tg3_flag(tp, PCIX_MODE)) {
9086 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9089 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9090 pcix_cmd |= PCI_X_CMD_READ_2K;
9091 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9092 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9093 pcix_cmd |= PCI_X_CMD_READ_2K;
9095 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9099 tw32_f(RDMAC_MODE, rdmac_mode);
9102 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9103 if (!tg3_flag(tp, 5705_PLUS))
9104 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9108 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9110 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9112 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9113 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9114 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9115 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9116 val |= RCVDBDI_MODE_LRG_RING_SZ;
9117 tw32(RCVDBDI_MODE, val);
9118 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9119 if (tg3_flag(tp, HW_TSO_1) ||
9120 tg3_flag(tp, HW_TSO_2) ||
9121 tg3_flag(tp, HW_TSO_3))
9122 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9123 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9124 if (tg3_flag(tp, ENABLE_TSS))
9125 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9126 tw32(SNDBDI_MODE, val);
9127 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9129 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9130 err = tg3_load_5701_a0_firmware_fix(tp);
9135 if (tg3_flag(tp, TSO_CAPABLE)) {
9136 err = tg3_load_tso_firmware(tp);
9141 tp->tx_mode = TX_MODE_ENABLE;
9143 if (tg3_flag(tp, 5755_PLUS) ||
9144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9145 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9148 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9149 tp->tx_mode &= ~val;
9150 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9153 tw32_f(MAC_TX_MODE, tp->tx_mode);
9156 if (tg3_flag(tp, ENABLE_RSS)) {
9157 tg3_rss_write_indir_tbl(tp);
9159 /* Setup the "secret" hash key. */
9160 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9161 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9162 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9163 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9164 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9165 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9166 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9167 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9168 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9169 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9172 tp->rx_mode = RX_MODE_ENABLE;
9173 if (tg3_flag(tp, 5755_PLUS))
9174 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9176 if (tg3_flag(tp, ENABLE_RSS))
9177 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9178 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9179 RX_MODE_RSS_IPV6_HASH_EN |
9180 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9181 RX_MODE_RSS_IPV4_HASH_EN |
9182 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9184 tw32_f(MAC_RX_MODE, tp->rx_mode);
9187 tw32(MAC_LED_CTRL, tp->led_ctrl);
9189 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9190 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9191 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9194 tw32_f(MAC_RX_MODE, tp->rx_mode);
9197 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9198 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9199 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9200 /* Set drive transmission level to 1.2V */
9201 /* only if the signal pre-emphasis bit is not set */
9202 val = tr32(MAC_SERDES_CFG);
9205 tw32(MAC_SERDES_CFG, val);
9207 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9208 tw32(MAC_SERDES_CFG, 0x616000);
9211 /* Prevent chip from dropping frames when flow control
9214 if (tg3_flag(tp, 57765_CLASS))
9218 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9221 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9222 /* Use hardware link auto-negotiation */
9223 tg3_flag_set(tp, HW_AUTONEG);
9226 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9230 tmp = tr32(SERDES_RX_CTRL);
9231 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9232 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9233 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9234 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9237 if (!tg3_flag(tp, USE_PHYLIB)) {
9238 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9239 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9241 err = tg3_setup_phy(tp, 0);
9245 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9246 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9249 /* Clear CRC stats. */
9250 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9251 tg3_writephy(tp, MII_TG3_TEST1,
9252 tmp | MII_TG3_TEST1_CRC_EN);
9253 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9258 __tg3_set_rx_mode(tp->dev);
9260 /* Initialize receive rules. */
9261 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9262 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9263 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9264 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9266 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9270 if (tg3_flag(tp, ENABLE_ASF))
9274 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9276 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9278 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9280 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9282 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9284 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9286 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9288 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9290 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9292 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9294 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9296 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9298 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9300 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9308 if (tg3_flag(tp, ENABLE_APE))
9309 /* Write our heartbeat update interval to APE. */
9310 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9311 APE_HOST_HEARTBEAT_INT_DISABLE);
9313 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9318 /* Called at device open time to get the chip ready for
9319 * packet processing. Invoked with tp->lock held.
9321 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9323 tg3_switch_clocks(tp);
9325 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9327 return tg3_reset_hw(tp, reset_phy);
9330 #define TG3_STAT_ADD32(PSTAT, REG) \
9331 do { u32 __val = tr32(REG); \
9332 (PSTAT)->low += __val; \
9333 if ((PSTAT)->low < __val) \
9334 (PSTAT)->high += 1; \
9337 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9339 struct tg3_hw_stats *sp = tp->hw_stats;
9341 if (!netif_carrier_ok(tp->dev))
9344 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9345 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9346 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9347 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9348 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9349 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9350 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9351 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9352 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9353 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9354 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9355 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9356 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9358 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9359 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9360 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9361 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9362 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9363 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9364 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9365 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9366 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9367 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9368 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9369 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9370 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9371 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9373 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9374 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9375 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9376 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9377 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9379 u32 val = tr32(HOSTCC_FLOW_ATTN);
9380 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9382 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9383 sp->rx_discards.low += val;
9384 if (sp->rx_discards.low < val)
9385 sp->rx_discards.high += 1;
9387 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9389 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9392 static void tg3_chk_missed_msi(struct tg3 *tp)
9396 for (i = 0; i < tp->irq_cnt; i++) {
9397 struct tg3_napi *tnapi = &tp->napi[i];
9399 if (tg3_has_work(tnapi)) {
9400 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9401 tnapi->last_tx_cons == tnapi->tx_cons) {
9402 if (tnapi->chk_msi_cnt < 1) {
9403 tnapi->chk_msi_cnt++;
9409 tnapi->chk_msi_cnt = 0;
9410 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9411 tnapi->last_tx_cons = tnapi->tx_cons;
9415 static void tg3_timer(unsigned long __opaque)
9417 struct tg3 *tp = (struct tg3 *) __opaque;
9419 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9422 spin_lock(&tp->lock);
9424 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9425 tg3_flag(tp, 57765_CLASS))
9426 tg3_chk_missed_msi(tp);
9428 if (!tg3_flag(tp, TAGGED_STATUS)) {
9429 /* All of this garbage is because when using non-tagged
9430 * IRQ status the mailbox/status_block protocol the chip
9431 * uses with the cpu is race prone.
9433 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9434 tw32(GRC_LOCAL_CTRL,
9435 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9437 tw32(HOSTCC_MODE, tp->coalesce_mode |
9438 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9441 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9442 spin_unlock(&tp->lock);
9443 tg3_reset_task_schedule(tp);
9448 /* This part only runs once per second. */
9449 if (!--tp->timer_counter) {
9450 if (tg3_flag(tp, 5705_PLUS))
9451 tg3_periodic_fetch_stats(tp);
9453 if (tp->setlpicnt && !--tp->setlpicnt)
9454 tg3_phy_eee_enable(tp);
9456 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9460 mac_stat = tr32(MAC_STATUS);
9463 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9464 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9466 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9470 tg3_setup_phy(tp, 0);
9471 } else if (tg3_flag(tp, POLL_SERDES)) {
9472 u32 mac_stat = tr32(MAC_STATUS);
9475 if (netif_carrier_ok(tp->dev) &&
9476 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9479 if (!netif_carrier_ok(tp->dev) &&
9480 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9481 MAC_STATUS_SIGNAL_DET))) {
9485 if (!tp->serdes_counter) {
9488 ~MAC_MODE_PORT_MODE_MASK));
9490 tw32_f(MAC_MODE, tp->mac_mode);
9493 tg3_setup_phy(tp, 0);
9495 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9496 tg3_flag(tp, 5780_CLASS)) {
9497 tg3_serdes_parallel_detect(tp);
9500 tp->timer_counter = tp->timer_multiplier;
9503 /* Heartbeat is only sent once every 2 seconds.
9505 * The heartbeat is to tell the ASF firmware that the host
9506 * driver is still alive. In the event that the OS crashes,
9507 * ASF needs to reset the hardware to free up the FIFO space
9508 * that may be filled with rx packets destined for the host.
9509 * If the FIFO is full, ASF will no longer function properly.
9511 * Unintended resets have been reported on real time kernels
9512 * where the timer doesn't run on time. Netpoll will also have
9515 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9516 * to check the ring condition when the heartbeat is expiring
9517 * before doing the reset. This will prevent most unintended
9520 if (!--tp->asf_counter) {
9521 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9522 tg3_wait_for_event_ack(tp);
9524 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9525 FWCMD_NICDRV_ALIVE3);
9526 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9527 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9528 TG3_FW_UPDATE_TIMEOUT_SEC);
9530 tg3_generate_fw_event(tp);
9532 tp->asf_counter = tp->asf_multiplier;
9535 spin_unlock(&tp->lock);
9538 tp->timer.expires = jiffies + tp->timer_offset;
9539 add_timer(&tp->timer);
9542 static void __devinit tg3_timer_init(struct tg3 *tp)
9544 if (tg3_flag(tp, TAGGED_STATUS) &&
9545 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9546 !tg3_flag(tp, 57765_CLASS))
9547 tp->timer_offset = HZ;
9549 tp->timer_offset = HZ / 10;
9551 BUG_ON(tp->timer_offset > HZ);
9553 tp->timer_multiplier = (HZ / tp->timer_offset);
9554 tp->asf_multiplier = (HZ / tp->timer_offset) *
9555 TG3_FW_UPDATE_FREQ_SEC;
9557 init_timer(&tp->timer);
9558 tp->timer.data = (unsigned long) tp;
9559 tp->timer.function = tg3_timer;
9562 static void tg3_timer_start(struct tg3 *tp)
9564 tp->asf_counter = tp->asf_multiplier;
9565 tp->timer_counter = tp->timer_multiplier;
9567 tp->timer.expires = jiffies + tp->timer_offset;
9568 add_timer(&tp->timer);
9571 static void tg3_timer_stop(struct tg3 *tp)
9573 del_timer_sync(&tp->timer);
9576 /* Restart hardware after configuration changes, self-test, etc.
9577 * Invoked with tp->lock held.
9579 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9580 __releases(tp->lock)
9581 __acquires(tp->lock)
9585 err = tg3_init_hw(tp, reset_phy);
9588 "Failed to re-initialize device, aborting\n");
9589 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9590 tg3_full_unlock(tp);
9593 tg3_napi_enable(tp);
9595 tg3_full_lock(tp, 0);
9600 static void tg3_reset_task(struct work_struct *work)
9602 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9605 tg3_full_lock(tp, 0);
9607 if (!netif_running(tp->dev)) {
9608 tg3_flag_clear(tp, RESET_TASK_PENDING);
9609 tg3_full_unlock(tp);
9613 tg3_full_unlock(tp);
9619 tg3_full_lock(tp, 1);
9621 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9622 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9623 tp->write32_rx_mbox = tg3_write_flush_reg32;
9624 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9625 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9628 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9629 err = tg3_init_hw(tp, 1);
9633 tg3_netif_start(tp);
9636 tg3_full_unlock(tp);
9641 tg3_flag_clear(tp, RESET_TASK_PENDING);
9644 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9647 unsigned long flags;
9649 struct tg3_napi *tnapi = &tp->napi[irq_num];
9651 if (tp->irq_cnt == 1)
9652 name = tp->dev->name;
9654 name = &tnapi->irq_lbl[0];
9655 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9656 name[IFNAMSIZ-1] = 0;
9659 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9661 if (tg3_flag(tp, 1SHOT_MSI))
9666 if (tg3_flag(tp, TAGGED_STATUS))
9667 fn = tg3_interrupt_tagged;
9668 flags = IRQF_SHARED;
9671 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9674 static int tg3_test_interrupt(struct tg3 *tp)
9676 struct tg3_napi *tnapi = &tp->napi[0];
9677 struct net_device *dev = tp->dev;
9678 int err, i, intr_ok = 0;
9681 if (!netif_running(dev))
9684 tg3_disable_ints(tp);
9686 free_irq(tnapi->irq_vec, tnapi);
9689 * Turn off MSI one shot mode. Otherwise this test has no
9690 * observable way to know whether the interrupt was delivered.
9692 if (tg3_flag(tp, 57765_PLUS)) {
9693 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9694 tw32(MSGINT_MODE, val);
9697 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9698 IRQF_SHARED, dev->name, tnapi);
9702 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9703 tg3_enable_ints(tp);
9705 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9708 for (i = 0; i < 5; i++) {
9709 u32 int_mbox, misc_host_ctrl;
9711 int_mbox = tr32_mailbox(tnapi->int_mbox);
9712 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9714 if ((int_mbox != 0) ||
9715 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9720 if (tg3_flag(tp, 57765_PLUS) &&
9721 tnapi->hw_status->status_tag != tnapi->last_tag)
9722 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9727 tg3_disable_ints(tp);
9729 free_irq(tnapi->irq_vec, tnapi);
9731 err = tg3_request_irq(tp, 0);
9737 /* Reenable MSI one shot mode. */
9738 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9739 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9740 tw32(MSGINT_MODE, val);
9748 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9749 * successfully restored
9751 static int tg3_test_msi(struct tg3 *tp)
9756 if (!tg3_flag(tp, USING_MSI))
9759 /* Turn off SERR reporting in case MSI terminates with Master
9762 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9763 pci_write_config_word(tp->pdev, PCI_COMMAND,
9764 pci_cmd & ~PCI_COMMAND_SERR);
9766 err = tg3_test_interrupt(tp);
9768 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9773 /* other failures */
9777 /* MSI test failed, go back to INTx mode */
9778 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9779 "to INTx mode. Please report this failure to the PCI "
9780 "maintainer and include system chipset information\n");
9782 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9784 pci_disable_msi(tp->pdev);
9786 tg3_flag_clear(tp, USING_MSI);
9787 tp->napi[0].irq_vec = tp->pdev->irq;
9789 err = tg3_request_irq(tp, 0);
9793 /* Need to reset the chip because the MSI cycle may have terminated
9794 * with Master Abort.
9796 tg3_full_lock(tp, 1);
9798 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9799 err = tg3_init_hw(tp, 1);
9801 tg3_full_unlock(tp);
9804 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9809 static int tg3_request_firmware(struct tg3 *tp)
9811 const __be32 *fw_data;
9813 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9814 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9819 fw_data = (void *)tp->fw->data;
9821 /* Firmware blob starts with version numbers, followed by
9822 * start address and _full_ length including BSS sections
9823 * (which must be longer than the actual data, of course
9826 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9827 if (tp->fw_len < (tp->fw->size - 12)) {
9828 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9829 tp->fw_len, tp->fw_needed);
9830 release_firmware(tp->fw);
9835 /* We no longer need firmware; we have it. */
9836 tp->fw_needed = NULL;
9840 static bool tg3_enable_msix(struct tg3 *tp)
9843 struct msix_entry msix_ent[tp->irq_max];
9845 tp->irq_cnt = num_online_cpus();
9846 if (tp->irq_cnt > 1) {
9847 /* We want as many rx rings enabled as there are cpus.
9848 * In multiqueue MSI-X mode, the first MSI-X vector
9849 * only deals with link interrupts, etc, so we add
9850 * one to the number of vectors we are requesting.
9852 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9855 for (i = 0; i < tp->irq_max; i++) {
9856 msix_ent[i].entry = i;
9857 msix_ent[i].vector = 0;
9860 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9863 } else if (rc != 0) {
9864 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9866 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9871 for (i = 0; i < tp->irq_max; i++)
9872 tp->napi[i].irq_vec = msix_ent[i].vector;
9874 netif_set_real_num_tx_queues(tp->dev, 1);
9875 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9876 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9877 pci_disable_msix(tp->pdev);
9881 if (tp->irq_cnt > 1) {
9882 tg3_flag_set(tp, ENABLE_RSS);
9884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9885 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9886 tg3_flag_set(tp, ENABLE_TSS);
9887 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9894 static void tg3_ints_init(struct tg3 *tp)
9896 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9897 !tg3_flag(tp, TAGGED_STATUS)) {
9898 /* All MSI supporting chips should support tagged
9899 * status. Assert that this is the case.
9901 netdev_warn(tp->dev,
9902 "MSI without TAGGED_STATUS? Not using MSI\n");
9906 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9907 tg3_flag_set(tp, USING_MSIX);
9908 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9909 tg3_flag_set(tp, USING_MSI);
9911 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9912 u32 msi_mode = tr32(MSGINT_MODE);
9913 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9914 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9915 if (!tg3_flag(tp, 1SHOT_MSI))
9916 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9917 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9920 if (!tg3_flag(tp, USING_MSIX)) {
9922 tp->napi[0].irq_vec = tp->pdev->irq;
9923 netif_set_real_num_tx_queues(tp->dev, 1);
9924 netif_set_real_num_rx_queues(tp->dev, 1);
9928 static void tg3_ints_fini(struct tg3 *tp)
9930 if (tg3_flag(tp, USING_MSIX))
9931 pci_disable_msix(tp->pdev);
9932 else if (tg3_flag(tp, USING_MSI))
9933 pci_disable_msi(tp->pdev);
9934 tg3_flag_clear(tp, USING_MSI);
9935 tg3_flag_clear(tp, USING_MSIX);
9936 tg3_flag_clear(tp, ENABLE_RSS);
9937 tg3_flag_clear(tp, ENABLE_TSS);
9940 static int tg3_open(struct net_device *dev)
9942 struct tg3 *tp = netdev_priv(dev);
9945 if (tp->fw_needed) {
9946 err = tg3_request_firmware(tp);
9947 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9951 netdev_warn(tp->dev, "TSO capability disabled\n");
9952 tg3_flag_clear(tp, TSO_CAPABLE);
9953 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9954 netdev_notice(tp->dev, "TSO capability restored\n");
9955 tg3_flag_set(tp, TSO_CAPABLE);
9959 netif_carrier_off(tp->dev);
9961 err = tg3_power_up(tp);
9965 tg3_full_lock(tp, 0);
9967 tg3_disable_ints(tp);
9968 tg3_flag_clear(tp, INIT_COMPLETE);
9970 tg3_full_unlock(tp);
9973 * Setup interrupts first so we know how
9974 * many NAPI resources to allocate
9978 tg3_rss_check_indir_tbl(tp);
9980 /* The placement of this call is tied
9981 * to the setup and use of Host TX descriptors.
9983 err = tg3_alloc_consistent(tp);
9989 tg3_napi_enable(tp);
9991 for (i = 0; i < tp->irq_cnt; i++) {
9992 struct tg3_napi *tnapi = &tp->napi[i];
9993 err = tg3_request_irq(tp, i);
9995 for (i--; i >= 0; i--) {
9996 tnapi = &tp->napi[i];
9997 free_irq(tnapi->irq_vec, tnapi);
10003 tg3_full_lock(tp, 0);
10005 err = tg3_init_hw(tp, 1);
10007 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10008 tg3_free_rings(tp);
10011 tg3_full_unlock(tp);
10016 if (tg3_flag(tp, USING_MSI)) {
10017 err = tg3_test_msi(tp);
10020 tg3_full_lock(tp, 0);
10021 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10022 tg3_free_rings(tp);
10023 tg3_full_unlock(tp);
10028 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10029 u32 val = tr32(PCIE_TRANSACTION_CFG);
10031 tw32(PCIE_TRANSACTION_CFG,
10032 val | PCIE_TRANS_CFG_1SHOT_MSI);
10038 tg3_full_lock(tp, 0);
10040 tg3_timer_start(tp);
10041 tg3_flag_set(tp, INIT_COMPLETE);
10042 tg3_enable_ints(tp);
10044 tg3_full_unlock(tp);
10046 netif_tx_start_all_queues(dev);
10049 * Reset loopback feature if it was turned on while the device was down
10050 * make sure that it's installed properly now.
10052 if (dev->features & NETIF_F_LOOPBACK)
10053 tg3_set_loopback(dev, dev->features);
10058 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10059 struct tg3_napi *tnapi = &tp->napi[i];
10060 free_irq(tnapi->irq_vec, tnapi);
10064 tg3_napi_disable(tp);
10066 tg3_free_consistent(tp);
10070 tg3_frob_aux_power(tp, false);
10071 pci_set_power_state(tp->pdev, PCI_D3hot);
10075 static int tg3_close(struct net_device *dev)
10078 struct tg3 *tp = netdev_priv(dev);
10080 tg3_napi_disable(tp);
10081 tg3_reset_task_cancel(tp);
10083 netif_tx_stop_all_queues(dev);
10085 tg3_timer_stop(tp);
10089 tg3_full_lock(tp, 1);
10091 tg3_disable_ints(tp);
10093 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10094 tg3_free_rings(tp);
10095 tg3_flag_clear(tp, INIT_COMPLETE);
10097 tg3_full_unlock(tp);
10099 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10100 struct tg3_napi *tnapi = &tp->napi[i];
10101 free_irq(tnapi->irq_vec, tnapi);
10106 /* Clear stats across close / open calls */
10107 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10108 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10112 tg3_free_consistent(tp);
10114 tg3_power_down(tp);
10116 netif_carrier_off(tp->dev);
10121 static inline u64 get_stat64(tg3_stat64_t *val)
10123 return ((u64)val->high << 32) | ((u64)val->low);
10126 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10128 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10130 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10131 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10135 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10136 tg3_writephy(tp, MII_TG3_TEST1,
10137 val | MII_TG3_TEST1_CRC_EN);
10138 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10142 tp->phy_crc_errors += val;
10144 return tp->phy_crc_errors;
10147 return get_stat64(&hw_stats->rx_fcs_errors);
10150 #define ESTAT_ADD(member) \
10151 estats->member = old_estats->member + \
10152 get_stat64(&hw_stats->member)
10154 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10156 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10157 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10159 ESTAT_ADD(rx_octets);
10160 ESTAT_ADD(rx_fragments);
10161 ESTAT_ADD(rx_ucast_packets);
10162 ESTAT_ADD(rx_mcast_packets);
10163 ESTAT_ADD(rx_bcast_packets);
10164 ESTAT_ADD(rx_fcs_errors);
10165 ESTAT_ADD(rx_align_errors);
10166 ESTAT_ADD(rx_xon_pause_rcvd);
10167 ESTAT_ADD(rx_xoff_pause_rcvd);
10168 ESTAT_ADD(rx_mac_ctrl_rcvd);
10169 ESTAT_ADD(rx_xoff_entered);
10170 ESTAT_ADD(rx_frame_too_long_errors);
10171 ESTAT_ADD(rx_jabbers);
10172 ESTAT_ADD(rx_undersize_packets);
10173 ESTAT_ADD(rx_in_length_errors);
10174 ESTAT_ADD(rx_out_length_errors);
10175 ESTAT_ADD(rx_64_or_less_octet_packets);
10176 ESTAT_ADD(rx_65_to_127_octet_packets);
10177 ESTAT_ADD(rx_128_to_255_octet_packets);
10178 ESTAT_ADD(rx_256_to_511_octet_packets);
10179 ESTAT_ADD(rx_512_to_1023_octet_packets);
10180 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10181 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10182 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10183 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10184 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10186 ESTAT_ADD(tx_octets);
10187 ESTAT_ADD(tx_collisions);
10188 ESTAT_ADD(tx_xon_sent);
10189 ESTAT_ADD(tx_xoff_sent);
10190 ESTAT_ADD(tx_flow_control);
10191 ESTAT_ADD(tx_mac_errors);
10192 ESTAT_ADD(tx_single_collisions);
10193 ESTAT_ADD(tx_mult_collisions);
10194 ESTAT_ADD(tx_deferred);
10195 ESTAT_ADD(tx_excessive_collisions);
10196 ESTAT_ADD(tx_late_collisions);
10197 ESTAT_ADD(tx_collide_2times);
10198 ESTAT_ADD(tx_collide_3times);
10199 ESTAT_ADD(tx_collide_4times);
10200 ESTAT_ADD(tx_collide_5times);
10201 ESTAT_ADD(tx_collide_6times);
10202 ESTAT_ADD(tx_collide_7times);
10203 ESTAT_ADD(tx_collide_8times);
10204 ESTAT_ADD(tx_collide_9times);
10205 ESTAT_ADD(tx_collide_10times);
10206 ESTAT_ADD(tx_collide_11times);
10207 ESTAT_ADD(tx_collide_12times);
10208 ESTAT_ADD(tx_collide_13times);
10209 ESTAT_ADD(tx_collide_14times);
10210 ESTAT_ADD(tx_collide_15times);
10211 ESTAT_ADD(tx_ucast_packets);
10212 ESTAT_ADD(tx_mcast_packets);
10213 ESTAT_ADD(tx_bcast_packets);
10214 ESTAT_ADD(tx_carrier_sense_errors);
10215 ESTAT_ADD(tx_discards);
10216 ESTAT_ADD(tx_errors);
10218 ESTAT_ADD(dma_writeq_full);
10219 ESTAT_ADD(dma_write_prioq_full);
10220 ESTAT_ADD(rxbds_empty);
10221 ESTAT_ADD(rx_discards);
10222 ESTAT_ADD(rx_errors);
10223 ESTAT_ADD(rx_threshold_hit);
10225 ESTAT_ADD(dma_readq_full);
10226 ESTAT_ADD(dma_read_prioq_full);
10227 ESTAT_ADD(tx_comp_queue_full);
10229 ESTAT_ADD(ring_set_send_prod_index);
10230 ESTAT_ADD(ring_status_update);
10231 ESTAT_ADD(nic_irqs);
10232 ESTAT_ADD(nic_avoided_irqs);
10233 ESTAT_ADD(nic_tx_threshold_hit);
10235 ESTAT_ADD(mbuf_lwm_thresh_hit);
10238 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10240 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10241 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10243 stats->rx_packets = old_stats->rx_packets +
10244 get_stat64(&hw_stats->rx_ucast_packets) +
10245 get_stat64(&hw_stats->rx_mcast_packets) +
10246 get_stat64(&hw_stats->rx_bcast_packets);
10248 stats->tx_packets = old_stats->tx_packets +
10249 get_stat64(&hw_stats->tx_ucast_packets) +
10250 get_stat64(&hw_stats->tx_mcast_packets) +
10251 get_stat64(&hw_stats->tx_bcast_packets);
10253 stats->rx_bytes = old_stats->rx_bytes +
10254 get_stat64(&hw_stats->rx_octets);
10255 stats->tx_bytes = old_stats->tx_bytes +
10256 get_stat64(&hw_stats->tx_octets);
10258 stats->rx_errors = old_stats->rx_errors +
10259 get_stat64(&hw_stats->rx_errors);
10260 stats->tx_errors = old_stats->tx_errors +
10261 get_stat64(&hw_stats->tx_errors) +
10262 get_stat64(&hw_stats->tx_mac_errors) +
10263 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10264 get_stat64(&hw_stats->tx_discards);
10266 stats->multicast = old_stats->multicast +
10267 get_stat64(&hw_stats->rx_mcast_packets);
10268 stats->collisions = old_stats->collisions +
10269 get_stat64(&hw_stats->tx_collisions);
10271 stats->rx_length_errors = old_stats->rx_length_errors +
10272 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10273 get_stat64(&hw_stats->rx_undersize_packets);
10275 stats->rx_over_errors = old_stats->rx_over_errors +
10276 get_stat64(&hw_stats->rxbds_empty);
10277 stats->rx_frame_errors = old_stats->rx_frame_errors +
10278 get_stat64(&hw_stats->rx_align_errors);
10279 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10280 get_stat64(&hw_stats->tx_discards);
10281 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10282 get_stat64(&hw_stats->tx_carrier_sense_errors);
10284 stats->rx_crc_errors = old_stats->rx_crc_errors +
10285 tg3_calc_crc_errors(tp);
10287 stats->rx_missed_errors = old_stats->rx_missed_errors +
10288 get_stat64(&hw_stats->rx_discards);
10290 stats->rx_dropped = tp->rx_dropped;
10291 stats->tx_dropped = tp->tx_dropped;
10294 static int tg3_get_regs_len(struct net_device *dev)
10296 return TG3_REG_BLK_SIZE;
10299 static void tg3_get_regs(struct net_device *dev,
10300 struct ethtool_regs *regs, void *_p)
10302 struct tg3 *tp = netdev_priv(dev);
10306 memset(_p, 0, TG3_REG_BLK_SIZE);
10308 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10311 tg3_full_lock(tp, 0);
10313 tg3_dump_legacy_regs(tp, (u32 *)_p);
10315 tg3_full_unlock(tp);
10318 static int tg3_get_eeprom_len(struct net_device *dev)
10320 struct tg3 *tp = netdev_priv(dev);
10322 return tp->nvram_size;
10325 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10327 struct tg3 *tp = netdev_priv(dev);
10330 u32 i, offset, len, b_offset, b_count;
10333 if (tg3_flag(tp, NO_NVRAM))
10336 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10339 offset = eeprom->offset;
10343 eeprom->magic = TG3_EEPROM_MAGIC;
10346 /* adjustments to start on required 4 byte boundary */
10347 b_offset = offset & 3;
10348 b_count = 4 - b_offset;
10349 if (b_count > len) {
10350 /* i.e. offset=1 len=2 */
10353 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10356 memcpy(data, ((char *)&val) + b_offset, b_count);
10359 eeprom->len += b_count;
10362 /* read bytes up to the last 4 byte boundary */
10363 pd = &data[eeprom->len];
10364 for (i = 0; i < (len - (len & 3)); i += 4) {
10365 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10370 memcpy(pd + i, &val, 4);
10375 /* read last bytes not ending on 4 byte boundary */
10376 pd = &data[eeprom->len];
10378 b_offset = offset + len - b_count;
10379 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10382 memcpy(pd, &val, b_count);
10383 eeprom->len += b_count;
10388 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10390 struct tg3 *tp = netdev_priv(dev);
10392 u32 offset, len, b_offset, odd_len;
10396 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10399 if (tg3_flag(tp, NO_NVRAM) ||
10400 eeprom->magic != TG3_EEPROM_MAGIC)
10403 offset = eeprom->offset;
10406 if ((b_offset = (offset & 3))) {
10407 /* adjustments to start on required 4 byte boundary */
10408 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10419 /* adjustments to end on required 4 byte boundary */
10421 len = (len + 3) & ~3;
10422 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10428 if (b_offset || odd_len) {
10429 buf = kmalloc(len, GFP_KERNEL);
10433 memcpy(buf, &start, 4);
10435 memcpy(buf+len-4, &end, 4);
10436 memcpy(buf + b_offset, data, eeprom->len);
10439 ret = tg3_nvram_write_block(tp, offset, len, buf);
10447 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10449 struct tg3 *tp = netdev_priv(dev);
10451 if (tg3_flag(tp, USE_PHYLIB)) {
10452 struct phy_device *phydev;
10453 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10455 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10456 return phy_ethtool_gset(phydev, cmd);
10459 cmd->supported = (SUPPORTED_Autoneg);
10461 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10462 cmd->supported |= (SUPPORTED_1000baseT_Half |
10463 SUPPORTED_1000baseT_Full);
10465 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10466 cmd->supported |= (SUPPORTED_100baseT_Half |
10467 SUPPORTED_100baseT_Full |
10468 SUPPORTED_10baseT_Half |
10469 SUPPORTED_10baseT_Full |
10471 cmd->port = PORT_TP;
10473 cmd->supported |= SUPPORTED_FIBRE;
10474 cmd->port = PORT_FIBRE;
10477 cmd->advertising = tp->link_config.advertising;
10478 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10479 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10480 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10481 cmd->advertising |= ADVERTISED_Pause;
10483 cmd->advertising |= ADVERTISED_Pause |
10484 ADVERTISED_Asym_Pause;
10486 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10487 cmd->advertising |= ADVERTISED_Asym_Pause;
10490 if (netif_running(dev) && netif_carrier_ok(dev)) {
10491 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10492 cmd->duplex = tp->link_config.active_duplex;
10493 cmd->lp_advertising = tp->link_config.rmt_adv;
10494 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10495 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10496 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10498 cmd->eth_tp_mdix = ETH_TP_MDI;
10501 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10502 cmd->duplex = DUPLEX_UNKNOWN;
10503 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10505 cmd->phy_address = tp->phy_addr;
10506 cmd->transceiver = XCVR_INTERNAL;
10507 cmd->autoneg = tp->link_config.autoneg;
10513 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10515 struct tg3 *tp = netdev_priv(dev);
10516 u32 speed = ethtool_cmd_speed(cmd);
10518 if (tg3_flag(tp, USE_PHYLIB)) {
10519 struct phy_device *phydev;
10520 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10522 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10523 return phy_ethtool_sset(phydev, cmd);
10526 if (cmd->autoneg != AUTONEG_ENABLE &&
10527 cmd->autoneg != AUTONEG_DISABLE)
10530 if (cmd->autoneg == AUTONEG_DISABLE &&
10531 cmd->duplex != DUPLEX_FULL &&
10532 cmd->duplex != DUPLEX_HALF)
10535 if (cmd->autoneg == AUTONEG_ENABLE) {
10536 u32 mask = ADVERTISED_Autoneg |
10538 ADVERTISED_Asym_Pause;
10540 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10541 mask |= ADVERTISED_1000baseT_Half |
10542 ADVERTISED_1000baseT_Full;
10544 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10545 mask |= ADVERTISED_100baseT_Half |
10546 ADVERTISED_100baseT_Full |
10547 ADVERTISED_10baseT_Half |
10548 ADVERTISED_10baseT_Full |
10551 mask |= ADVERTISED_FIBRE;
10553 if (cmd->advertising & ~mask)
10556 mask &= (ADVERTISED_1000baseT_Half |
10557 ADVERTISED_1000baseT_Full |
10558 ADVERTISED_100baseT_Half |
10559 ADVERTISED_100baseT_Full |
10560 ADVERTISED_10baseT_Half |
10561 ADVERTISED_10baseT_Full);
10563 cmd->advertising &= mask;
10565 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10566 if (speed != SPEED_1000)
10569 if (cmd->duplex != DUPLEX_FULL)
10572 if (speed != SPEED_100 &&
10578 tg3_full_lock(tp, 0);
10580 tp->link_config.autoneg = cmd->autoneg;
10581 if (cmd->autoneg == AUTONEG_ENABLE) {
10582 tp->link_config.advertising = (cmd->advertising |
10583 ADVERTISED_Autoneg);
10584 tp->link_config.speed = SPEED_UNKNOWN;
10585 tp->link_config.duplex = DUPLEX_UNKNOWN;
10587 tp->link_config.advertising = 0;
10588 tp->link_config.speed = speed;
10589 tp->link_config.duplex = cmd->duplex;
10592 if (netif_running(dev))
10593 tg3_setup_phy(tp, 1);
10595 tg3_full_unlock(tp);
10600 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10602 struct tg3 *tp = netdev_priv(dev);
10604 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10605 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10606 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10607 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10610 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10612 struct tg3 *tp = netdev_priv(dev);
10614 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10615 wol->supported = WAKE_MAGIC;
10617 wol->supported = 0;
10619 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10620 wol->wolopts = WAKE_MAGIC;
10621 memset(&wol->sopass, 0, sizeof(wol->sopass));
10624 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10626 struct tg3 *tp = netdev_priv(dev);
10627 struct device *dp = &tp->pdev->dev;
10629 if (wol->wolopts & ~WAKE_MAGIC)
10631 if ((wol->wolopts & WAKE_MAGIC) &&
10632 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10635 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10637 spin_lock_bh(&tp->lock);
10638 if (device_may_wakeup(dp))
10639 tg3_flag_set(tp, WOL_ENABLE);
10641 tg3_flag_clear(tp, WOL_ENABLE);
10642 spin_unlock_bh(&tp->lock);
10647 static u32 tg3_get_msglevel(struct net_device *dev)
10649 struct tg3 *tp = netdev_priv(dev);
10650 return tp->msg_enable;
10653 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10655 struct tg3 *tp = netdev_priv(dev);
10656 tp->msg_enable = value;
10659 static int tg3_nway_reset(struct net_device *dev)
10661 struct tg3 *tp = netdev_priv(dev);
10664 if (!netif_running(dev))
10667 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10670 if (tg3_flag(tp, USE_PHYLIB)) {
10671 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10673 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10677 spin_lock_bh(&tp->lock);
10679 tg3_readphy(tp, MII_BMCR, &bmcr);
10680 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10681 ((bmcr & BMCR_ANENABLE) ||
10682 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10683 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10687 spin_unlock_bh(&tp->lock);
10693 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10695 struct tg3 *tp = netdev_priv(dev);
10697 ering->rx_max_pending = tp->rx_std_ring_mask;
10698 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10699 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10701 ering->rx_jumbo_max_pending = 0;
10703 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10705 ering->rx_pending = tp->rx_pending;
10706 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10707 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10709 ering->rx_jumbo_pending = 0;
10711 ering->tx_pending = tp->napi[0].tx_pending;
10714 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10716 struct tg3 *tp = netdev_priv(dev);
10717 int i, irq_sync = 0, err = 0;
10719 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10720 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10721 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10722 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10723 (tg3_flag(tp, TSO_BUG) &&
10724 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10727 if (netif_running(dev)) {
10729 tg3_netif_stop(tp);
10733 tg3_full_lock(tp, irq_sync);
10735 tp->rx_pending = ering->rx_pending;
10737 if (tg3_flag(tp, MAX_RXPEND_64) &&
10738 tp->rx_pending > 63)
10739 tp->rx_pending = 63;
10740 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10742 for (i = 0; i < tp->irq_max; i++)
10743 tp->napi[i].tx_pending = ering->tx_pending;
10745 if (netif_running(dev)) {
10746 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10747 err = tg3_restart_hw(tp, 1);
10749 tg3_netif_start(tp);
10752 tg3_full_unlock(tp);
10754 if (irq_sync && !err)
10760 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10762 struct tg3 *tp = netdev_priv(dev);
10764 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10766 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10767 epause->rx_pause = 1;
10769 epause->rx_pause = 0;
10771 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10772 epause->tx_pause = 1;
10774 epause->tx_pause = 0;
10777 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10779 struct tg3 *tp = netdev_priv(dev);
10782 if (tg3_flag(tp, USE_PHYLIB)) {
10784 struct phy_device *phydev;
10786 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10788 if (!(phydev->supported & SUPPORTED_Pause) ||
10789 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10790 (epause->rx_pause != epause->tx_pause)))
10793 tp->link_config.flowctrl = 0;
10794 if (epause->rx_pause) {
10795 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10797 if (epause->tx_pause) {
10798 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10799 newadv = ADVERTISED_Pause;
10801 newadv = ADVERTISED_Pause |
10802 ADVERTISED_Asym_Pause;
10803 } else if (epause->tx_pause) {
10804 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10805 newadv = ADVERTISED_Asym_Pause;
10809 if (epause->autoneg)
10810 tg3_flag_set(tp, PAUSE_AUTONEG);
10812 tg3_flag_clear(tp, PAUSE_AUTONEG);
10814 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10815 u32 oldadv = phydev->advertising &
10816 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10817 if (oldadv != newadv) {
10818 phydev->advertising &=
10819 ~(ADVERTISED_Pause |
10820 ADVERTISED_Asym_Pause);
10821 phydev->advertising |= newadv;
10822 if (phydev->autoneg) {
10824 * Always renegotiate the link to
10825 * inform our link partner of our
10826 * flow control settings, even if the
10827 * flow control is forced. Let
10828 * tg3_adjust_link() do the final
10829 * flow control setup.
10831 return phy_start_aneg(phydev);
10835 if (!epause->autoneg)
10836 tg3_setup_flow_control(tp, 0, 0);
10838 tp->link_config.advertising &=
10839 ~(ADVERTISED_Pause |
10840 ADVERTISED_Asym_Pause);
10841 tp->link_config.advertising |= newadv;
10846 if (netif_running(dev)) {
10847 tg3_netif_stop(tp);
10851 tg3_full_lock(tp, irq_sync);
10853 if (epause->autoneg)
10854 tg3_flag_set(tp, PAUSE_AUTONEG);
10856 tg3_flag_clear(tp, PAUSE_AUTONEG);
10857 if (epause->rx_pause)
10858 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10860 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10861 if (epause->tx_pause)
10862 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10864 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10866 if (netif_running(dev)) {
10867 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10868 err = tg3_restart_hw(tp, 1);
10870 tg3_netif_start(tp);
10873 tg3_full_unlock(tp);
10879 static int tg3_get_sset_count(struct net_device *dev, int sset)
10883 return TG3_NUM_TEST;
10885 return TG3_NUM_STATS;
10887 return -EOPNOTSUPP;
10891 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10892 u32 *rules __always_unused)
10894 struct tg3 *tp = netdev_priv(dev);
10896 if (!tg3_flag(tp, SUPPORT_MSIX))
10897 return -EOPNOTSUPP;
10899 switch (info->cmd) {
10900 case ETHTOOL_GRXRINGS:
10901 if (netif_running(tp->dev))
10902 info->data = tp->irq_cnt;
10904 info->data = num_online_cpus();
10905 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10906 info->data = TG3_IRQ_MAX_VECS_RSS;
10909 /* The first interrupt vector only
10910 * handles link interrupts.
10916 return -EOPNOTSUPP;
10920 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10923 struct tg3 *tp = netdev_priv(dev);
10925 if (tg3_flag(tp, SUPPORT_MSIX))
10926 size = TG3_RSS_INDIR_TBL_SIZE;
10931 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10933 struct tg3 *tp = netdev_priv(dev);
10936 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10937 indir[i] = tp->rss_ind_tbl[i];
10942 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10944 struct tg3 *tp = netdev_priv(dev);
10947 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10948 tp->rss_ind_tbl[i] = indir[i];
10950 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10953 /* It is legal to write the indirection
10954 * table while the device is running.
10956 tg3_full_lock(tp, 0);
10957 tg3_rss_write_indir_tbl(tp);
10958 tg3_full_unlock(tp);
10963 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10965 switch (stringset) {
10967 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10970 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10973 WARN_ON(1); /* we need a WARN() */
10978 static int tg3_set_phys_id(struct net_device *dev,
10979 enum ethtool_phys_id_state state)
10981 struct tg3 *tp = netdev_priv(dev);
10983 if (!netif_running(tp->dev))
10987 case ETHTOOL_ID_ACTIVE:
10988 return 1; /* cycle on/off once per second */
10990 case ETHTOOL_ID_ON:
10991 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10992 LED_CTRL_1000MBPS_ON |
10993 LED_CTRL_100MBPS_ON |
10994 LED_CTRL_10MBPS_ON |
10995 LED_CTRL_TRAFFIC_OVERRIDE |
10996 LED_CTRL_TRAFFIC_BLINK |
10997 LED_CTRL_TRAFFIC_LED);
11000 case ETHTOOL_ID_OFF:
11001 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11002 LED_CTRL_TRAFFIC_OVERRIDE);
11005 case ETHTOOL_ID_INACTIVE:
11006 tw32(MAC_LED_CTRL, tp->led_ctrl);
11013 static void tg3_get_ethtool_stats(struct net_device *dev,
11014 struct ethtool_stats *estats, u64 *tmp_stats)
11016 struct tg3 *tp = netdev_priv(dev);
11019 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11021 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11024 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11028 u32 offset = 0, len = 0;
11031 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11034 if (magic == TG3_EEPROM_MAGIC) {
11035 for (offset = TG3_NVM_DIR_START;
11036 offset < TG3_NVM_DIR_END;
11037 offset += TG3_NVM_DIRENT_SIZE) {
11038 if (tg3_nvram_read(tp, offset, &val))
11041 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11042 TG3_NVM_DIRTYPE_EXTVPD)
11046 if (offset != TG3_NVM_DIR_END) {
11047 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11048 if (tg3_nvram_read(tp, offset + 4, &offset))
11051 offset = tg3_nvram_logical_addr(tp, offset);
11055 if (!offset || !len) {
11056 offset = TG3_NVM_VPD_OFF;
11057 len = TG3_NVM_VPD_LEN;
11060 buf = kmalloc(len, GFP_KERNEL);
11064 if (magic == TG3_EEPROM_MAGIC) {
11065 for (i = 0; i < len; i += 4) {
11066 /* The data is in little-endian format in NVRAM.
11067 * Use the big-endian read routines to preserve
11068 * the byte order as it exists in NVRAM.
11070 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11076 unsigned int pos = 0;
11078 ptr = (u8 *)&buf[0];
11079 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11080 cnt = pci_read_vpd(tp->pdev, pos,
11082 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11100 #define NVRAM_TEST_SIZE 0x100
11101 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11102 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11103 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11104 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11105 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11106 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11107 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11108 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11110 static int tg3_test_nvram(struct tg3 *tp)
11112 u32 csum, magic, len;
11114 int i, j, k, err = 0, size;
11116 if (tg3_flag(tp, NO_NVRAM))
11119 if (tg3_nvram_read(tp, 0, &magic) != 0)
11122 if (magic == TG3_EEPROM_MAGIC)
11123 size = NVRAM_TEST_SIZE;
11124 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11125 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11126 TG3_EEPROM_SB_FORMAT_1) {
11127 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11128 case TG3_EEPROM_SB_REVISION_0:
11129 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11131 case TG3_EEPROM_SB_REVISION_2:
11132 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11134 case TG3_EEPROM_SB_REVISION_3:
11135 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11137 case TG3_EEPROM_SB_REVISION_4:
11138 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11140 case TG3_EEPROM_SB_REVISION_5:
11141 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11143 case TG3_EEPROM_SB_REVISION_6:
11144 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11151 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11152 size = NVRAM_SELFBOOT_HW_SIZE;
11156 buf = kmalloc(size, GFP_KERNEL);
11161 for (i = 0, j = 0; i < size; i += 4, j++) {
11162 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11169 /* Selfboot format */
11170 magic = be32_to_cpu(buf[0]);
11171 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11172 TG3_EEPROM_MAGIC_FW) {
11173 u8 *buf8 = (u8 *) buf, csum8 = 0;
11175 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11176 TG3_EEPROM_SB_REVISION_2) {
11177 /* For rev 2, the csum doesn't include the MBA. */
11178 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11180 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11183 for (i = 0; i < size; i++)
11196 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11197 TG3_EEPROM_MAGIC_HW) {
11198 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11199 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11200 u8 *buf8 = (u8 *) buf;
11202 /* Separate the parity bits and the data bytes. */
11203 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11204 if ((i == 0) || (i == 8)) {
11208 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11209 parity[k++] = buf8[i] & msk;
11211 } else if (i == 16) {
11215 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11216 parity[k++] = buf8[i] & msk;
11219 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11220 parity[k++] = buf8[i] & msk;
11223 data[j++] = buf8[i];
11227 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11228 u8 hw8 = hweight8(data[i]);
11230 if ((hw8 & 0x1) && parity[i])
11232 else if (!(hw8 & 0x1) && !parity[i])
11241 /* Bootstrap checksum at offset 0x10 */
11242 csum = calc_crc((unsigned char *) buf, 0x10);
11243 if (csum != le32_to_cpu(buf[0x10/4]))
11246 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11247 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11248 if (csum != le32_to_cpu(buf[0xfc/4]))
11253 buf = tg3_vpd_readblock(tp, &len);
11257 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11259 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11263 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11266 i += PCI_VPD_LRDT_TAG_SIZE;
11267 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11268 PCI_VPD_RO_KEYWORD_CHKSUM);
11272 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11274 for (i = 0; i <= j; i++)
11275 csum8 += ((u8 *)buf)[i];
11289 #define TG3_SERDES_TIMEOUT_SEC 2
11290 #define TG3_COPPER_TIMEOUT_SEC 6
11292 static int tg3_test_link(struct tg3 *tp)
11296 if (!netif_running(tp->dev))
11299 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11300 max = TG3_SERDES_TIMEOUT_SEC;
11302 max = TG3_COPPER_TIMEOUT_SEC;
11304 for (i = 0; i < max; i++) {
11305 if (netif_carrier_ok(tp->dev))
11308 if (msleep_interruptible(1000))
11315 /* Only test the commonly used registers */
11316 static int tg3_test_registers(struct tg3 *tp)
11318 int i, is_5705, is_5750;
11319 u32 offset, read_mask, write_mask, val, save_val, read_val;
11323 #define TG3_FL_5705 0x1
11324 #define TG3_FL_NOT_5705 0x2
11325 #define TG3_FL_NOT_5788 0x4
11326 #define TG3_FL_NOT_5750 0x8
11330 /* MAC Control Registers */
11331 { MAC_MODE, TG3_FL_NOT_5705,
11332 0x00000000, 0x00ef6f8c },
11333 { MAC_MODE, TG3_FL_5705,
11334 0x00000000, 0x01ef6b8c },
11335 { MAC_STATUS, TG3_FL_NOT_5705,
11336 0x03800107, 0x00000000 },
11337 { MAC_STATUS, TG3_FL_5705,
11338 0x03800100, 0x00000000 },
11339 { MAC_ADDR_0_HIGH, 0x0000,
11340 0x00000000, 0x0000ffff },
11341 { MAC_ADDR_0_LOW, 0x0000,
11342 0x00000000, 0xffffffff },
11343 { MAC_RX_MTU_SIZE, 0x0000,
11344 0x00000000, 0x0000ffff },
11345 { MAC_TX_MODE, 0x0000,
11346 0x00000000, 0x00000070 },
11347 { MAC_TX_LENGTHS, 0x0000,
11348 0x00000000, 0x00003fff },
11349 { MAC_RX_MODE, TG3_FL_NOT_5705,
11350 0x00000000, 0x000007fc },
11351 { MAC_RX_MODE, TG3_FL_5705,
11352 0x00000000, 0x000007dc },
11353 { MAC_HASH_REG_0, 0x0000,
11354 0x00000000, 0xffffffff },
11355 { MAC_HASH_REG_1, 0x0000,
11356 0x00000000, 0xffffffff },
11357 { MAC_HASH_REG_2, 0x0000,
11358 0x00000000, 0xffffffff },
11359 { MAC_HASH_REG_3, 0x0000,
11360 0x00000000, 0xffffffff },
11362 /* Receive Data and Receive BD Initiator Control Registers. */
11363 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11364 0x00000000, 0xffffffff },
11365 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11366 0x00000000, 0xffffffff },
11367 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11368 0x00000000, 0x00000003 },
11369 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11370 0x00000000, 0xffffffff },
11371 { RCVDBDI_STD_BD+0, 0x0000,
11372 0x00000000, 0xffffffff },
11373 { RCVDBDI_STD_BD+4, 0x0000,
11374 0x00000000, 0xffffffff },
11375 { RCVDBDI_STD_BD+8, 0x0000,
11376 0x00000000, 0xffff0002 },
11377 { RCVDBDI_STD_BD+0xc, 0x0000,
11378 0x00000000, 0xffffffff },
11380 /* Receive BD Initiator Control Registers. */
11381 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11382 0x00000000, 0xffffffff },
11383 { RCVBDI_STD_THRESH, TG3_FL_5705,
11384 0x00000000, 0x000003ff },
11385 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11386 0x00000000, 0xffffffff },
11388 /* Host Coalescing Control Registers. */
11389 { HOSTCC_MODE, TG3_FL_NOT_5705,
11390 0x00000000, 0x00000004 },
11391 { HOSTCC_MODE, TG3_FL_5705,
11392 0x00000000, 0x000000f6 },
11393 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11394 0x00000000, 0xffffffff },
11395 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11396 0x00000000, 0x000003ff },
11397 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11398 0x00000000, 0xffffffff },
11399 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11400 0x00000000, 0x000003ff },
11401 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11402 0x00000000, 0xffffffff },
11403 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11404 0x00000000, 0x000000ff },
11405 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11406 0x00000000, 0xffffffff },
11407 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11408 0x00000000, 0x000000ff },
11409 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11410 0x00000000, 0xffffffff },
11411 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11412 0x00000000, 0xffffffff },
11413 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11414 0x00000000, 0xffffffff },
11415 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11416 0x00000000, 0x000000ff },
11417 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11418 0x00000000, 0xffffffff },
11419 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11420 0x00000000, 0x000000ff },
11421 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11422 0x00000000, 0xffffffff },
11423 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11424 0x00000000, 0xffffffff },
11425 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11426 0x00000000, 0xffffffff },
11427 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11428 0x00000000, 0xffffffff },
11429 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11430 0x00000000, 0xffffffff },
11431 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11432 0xffffffff, 0x00000000 },
11433 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11434 0xffffffff, 0x00000000 },
11436 /* Buffer Manager Control Registers. */
11437 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11438 0x00000000, 0x007fff80 },
11439 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11440 0x00000000, 0x007fffff },
11441 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11442 0x00000000, 0x0000003f },
11443 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11444 0x00000000, 0x000001ff },
11445 { BUFMGR_MB_HIGH_WATER, 0x0000,
11446 0x00000000, 0x000001ff },
11447 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11448 0xffffffff, 0x00000000 },
11449 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11450 0xffffffff, 0x00000000 },
11452 /* Mailbox Registers */
11453 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11454 0x00000000, 0x000001ff },
11455 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11456 0x00000000, 0x000001ff },
11457 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11458 0x00000000, 0x000007ff },
11459 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11460 0x00000000, 0x000001ff },
11462 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11465 is_5705 = is_5750 = 0;
11466 if (tg3_flag(tp, 5705_PLUS)) {
11468 if (tg3_flag(tp, 5750_PLUS))
11472 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11473 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11476 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11479 if (tg3_flag(tp, IS_5788) &&
11480 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11483 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11486 offset = (u32) reg_tbl[i].offset;
11487 read_mask = reg_tbl[i].read_mask;
11488 write_mask = reg_tbl[i].write_mask;
11490 /* Save the original register content */
11491 save_val = tr32(offset);
11493 /* Determine the read-only value. */
11494 read_val = save_val & read_mask;
11496 /* Write zero to the register, then make sure the read-only bits
11497 * are not changed and the read/write bits are all zeros.
11501 val = tr32(offset);
11503 /* Test the read-only and read/write bits. */
11504 if (((val & read_mask) != read_val) || (val & write_mask))
11507 /* Write ones to all the bits defined by RdMask and WrMask, then
11508 * make sure the read-only bits are not changed and the
11509 * read/write bits are all ones.
11511 tw32(offset, read_mask | write_mask);
11513 val = tr32(offset);
11515 /* Test the read-only bits. */
11516 if ((val & read_mask) != read_val)
11519 /* Test the read/write bits. */
11520 if ((val & write_mask) != write_mask)
11523 tw32(offset, save_val);
11529 if (netif_msg_hw(tp))
11530 netdev_err(tp->dev,
11531 "Register test failed at offset %x\n", offset);
11532 tw32(offset, save_val);
11536 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11538 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11542 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11543 for (j = 0; j < len; j += 4) {
11546 tg3_write_mem(tp, offset + j, test_pattern[i]);
11547 tg3_read_mem(tp, offset + j, &val);
11548 if (val != test_pattern[i])
11555 static int tg3_test_memory(struct tg3 *tp)
11557 static struct mem_entry {
11560 } mem_tbl_570x[] = {
11561 { 0x00000000, 0x00b50},
11562 { 0x00002000, 0x1c000},
11563 { 0xffffffff, 0x00000}
11564 }, mem_tbl_5705[] = {
11565 { 0x00000100, 0x0000c},
11566 { 0x00000200, 0x00008},
11567 { 0x00004000, 0x00800},
11568 { 0x00006000, 0x01000},
11569 { 0x00008000, 0x02000},
11570 { 0x00010000, 0x0e000},
11571 { 0xffffffff, 0x00000}
11572 }, mem_tbl_5755[] = {
11573 { 0x00000200, 0x00008},
11574 { 0x00004000, 0x00800},
11575 { 0x00006000, 0x00800},
11576 { 0x00008000, 0x02000},
11577 { 0x00010000, 0x0c000},
11578 { 0xffffffff, 0x00000}
11579 }, mem_tbl_5906[] = {
11580 { 0x00000200, 0x00008},
11581 { 0x00004000, 0x00400},
11582 { 0x00006000, 0x00400},
11583 { 0x00008000, 0x01000},
11584 { 0x00010000, 0x01000},
11585 { 0xffffffff, 0x00000}
11586 }, mem_tbl_5717[] = {
11587 { 0x00000200, 0x00008},
11588 { 0x00010000, 0x0a000},
11589 { 0x00020000, 0x13c00},
11590 { 0xffffffff, 0x00000}
11591 }, mem_tbl_57765[] = {
11592 { 0x00000200, 0x00008},
11593 { 0x00004000, 0x00800},
11594 { 0x00006000, 0x09800},
11595 { 0x00010000, 0x0a000},
11596 { 0xffffffff, 0x00000}
11598 struct mem_entry *mem_tbl;
11602 if (tg3_flag(tp, 5717_PLUS))
11603 mem_tbl = mem_tbl_5717;
11604 else if (tg3_flag(tp, 57765_CLASS))
11605 mem_tbl = mem_tbl_57765;
11606 else if (tg3_flag(tp, 5755_PLUS))
11607 mem_tbl = mem_tbl_5755;
11608 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11609 mem_tbl = mem_tbl_5906;
11610 else if (tg3_flag(tp, 5705_PLUS))
11611 mem_tbl = mem_tbl_5705;
11613 mem_tbl = mem_tbl_570x;
11615 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11616 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11624 #define TG3_TSO_MSS 500
11626 #define TG3_TSO_IP_HDR_LEN 20
11627 #define TG3_TSO_TCP_HDR_LEN 20
11628 #define TG3_TSO_TCP_OPT_LEN 12
11630 static const u8 tg3_tso_header[] = {
11632 0x45, 0x00, 0x00, 0x00,
11633 0x00, 0x00, 0x40, 0x00,
11634 0x40, 0x06, 0x00, 0x00,
11635 0x0a, 0x00, 0x00, 0x01,
11636 0x0a, 0x00, 0x00, 0x02,
11637 0x0d, 0x00, 0xe0, 0x00,
11638 0x00, 0x00, 0x01, 0x00,
11639 0x00, 0x00, 0x02, 0x00,
11640 0x80, 0x10, 0x10, 0x00,
11641 0x14, 0x09, 0x00, 0x00,
11642 0x01, 0x01, 0x08, 0x0a,
11643 0x11, 0x11, 0x11, 0x11,
11644 0x11, 0x11, 0x11, 0x11,
11647 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11649 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11650 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11652 struct sk_buff *skb;
11653 u8 *tx_data, *rx_data;
11655 int num_pkts, tx_len, rx_len, i, err;
11656 struct tg3_rx_buffer_desc *desc;
11657 struct tg3_napi *tnapi, *rnapi;
11658 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11660 tnapi = &tp->napi[0];
11661 rnapi = &tp->napi[0];
11662 if (tp->irq_cnt > 1) {
11663 if (tg3_flag(tp, ENABLE_RSS))
11664 rnapi = &tp->napi[1];
11665 if (tg3_flag(tp, ENABLE_TSS))
11666 tnapi = &tp->napi[1];
11668 coal_now = tnapi->coal_now | rnapi->coal_now;
11673 skb = netdev_alloc_skb(tp->dev, tx_len);
11677 tx_data = skb_put(skb, tx_len);
11678 memcpy(tx_data, tp->dev->dev_addr, 6);
11679 memset(tx_data + 6, 0x0, 8);
11681 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11683 if (tso_loopback) {
11684 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11686 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11687 TG3_TSO_TCP_OPT_LEN;
11689 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11690 sizeof(tg3_tso_header));
11693 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11694 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11696 /* Set the total length field in the IP header */
11697 iph->tot_len = htons((u16)(mss + hdr_len));
11699 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11700 TXD_FLAG_CPU_POST_DMA);
11702 if (tg3_flag(tp, HW_TSO_1) ||
11703 tg3_flag(tp, HW_TSO_2) ||
11704 tg3_flag(tp, HW_TSO_3)) {
11706 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11707 th = (struct tcphdr *)&tx_data[val];
11710 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11712 if (tg3_flag(tp, HW_TSO_3)) {
11713 mss |= (hdr_len & 0xc) << 12;
11714 if (hdr_len & 0x10)
11715 base_flags |= 0x00000010;
11716 base_flags |= (hdr_len & 0x3e0) << 5;
11717 } else if (tg3_flag(tp, HW_TSO_2))
11718 mss |= hdr_len << 9;
11719 else if (tg3_flag(tp, HW_TSO_1) ||
11720 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11721 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11723 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11726 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11729 data_off = ETH_HLEN;
11731 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11732 tx_len > VLAN_ETH_FRAME_LEN)
11733 base_flags |= TXD_FLAG_JMB_PKT;
11736 for (i = data_off; i < tx_len; i++)
11737 tx_data[i] = (u8) (i & 0xff);
11739 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11740 if (pci_dma_mapping_error(tp->pdev, map)) {
11741 dev_kfree_skb(skb);
11745 val = tnapi->tx_prod;
11746 tnapi->tx_buffers[val].skb = skb;
11747 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11749 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11754 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11756 budget = tg3_tx_avail(tnapi);
11757 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11758 base_flags | TXD_FLAG_END, mss, 0)) {
11759 tnapi->tx_buffers[val].skb = NULL;
11760 dev_kfree_skb(skb);
11766 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11767 tr32_mailbox(tnapi->prodmbox);
11771 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11772 for (i = 0; i < 35; i++) {
11773 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11778 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11779 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11780 if ((tx_idx == tnapi->tx_prod) &&
11781 (rx_idx == (rx_start_idx + num_pkts)))
11785 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11786 dev_kfree_skb(skb);
11788 if (tx_idx != tnapi->tx_prod)
11791 if (rx_idx != rx_start_idx + num_pkts)
11795 while (rx_idx != rx_start_idx) {
11796 desc = &rnapi->rx_rcb[rx_start_idx++];
11797 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11798 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11800 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11801 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11804 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11807 if (!tso_loopback) {
11808 if (rx_len != tx_len)
11811 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11812 if (opaque_key != RXD_OPAQUE_RING_STD)
11815 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11818 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11819 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11820 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11824 if (opaque_key == RXD_OPAQUE_RING_STD) {
11825 rx_data = tpr->rx_std_buffers[desc_idx].data;
11826 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11828 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11829 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11830 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11835 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11836 PCI_DMA_FROMDEVICE);
11838 rx_data += TG3_RX_OFFSET(tp);
11839 for (i = data_off; i < rx_len; i++, val++) {
11840 if (*(rx_data + i) != (u8) (val & 0xff))
11847 /* tg3_free_rings will unmap and free the rx_data */
11852 #define TG3_STD_LOOPBACK_FAILED 1
11853 #define TG3_JMB_LOOPBACK_FAILED 2
11854 #define TG3_TSO_LOOPBACK_FAILED 4
11855 #define TG3_LOOPBACK_FAILED \
11856 (TG3_STD_LOOPBACK_FAILED | \
11857 TG3_JMB_LOOPBACK_FAILED | \
11858 TG3_TSO_LOOPBACK_FAILED)
11860 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11864 u32 jmb_pkt_sz = 9000;
11867 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11869 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11870 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11872 if (!netif_running(tp->dev)) {
11873 data[0] = TG3_LOOPBACK_FAILED;
11874 data[1] = TG3_LOOPBACK_FAILED;
11876 data[2] = TG3_LOOPBACK_FAILED;
11880 err = tg3_reset_hw(tp, 1);
11882 data[0] = TG3_LOOPBACK_FAILED;
11883 data[1] = TG3_LOOPBACK_FAILED;
11885 data[2] = TG3_LOOPBACK_FAILED;
11889 if (tg3_flag(tp, ENABLE_RSS)) {
11892 /* Reroute all rx packets to the 1st queue */
11893 for (i = MAC_RSS_INDIR_TBL_0;
11894 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11898 /* HW errata - mac loopback fails in some cases on 5780.
11899 * Normal traffic and PHY loopback are not affected by
11900 * errata. Also, the MAC loopback test is deprecated for
11901 * all newer ASIC revisions.
11903 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11904 !tg3_flag(tp, CPMU_PRESENT)) {
11905 tg3_mac_loopback(tp, true);
11907 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11908 data[0] |= TG3_STD_LOOPBACK_FAILED;
11910 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11911 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11912 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11914 tg3_mac_loopback(tp, false);
11917 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11918 !tg3_flag(tp, USE_PHYLIB)) {
11921 tg3_phy_lpbk_set(tp, 0, false);
11923 /* Wait for link */
11924 for (i = 0; i < 100; i++) {
11925 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11930 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11931 data[1] |= TG3_STD_LOOPBACK_FAILED;
11932 if (tg3_flag(tp, TSO_CAPABLE) &&
11933 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11934 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11935 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11936 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11937 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11940 tg3_phy_lpbk_set(tp, 0, true);
11942 /* All link indications report up, but the hardware
11943 * isn't really ready for about 20 msec. Double it
11948 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11949 data[2] |= TG3_STD_LOOPBACK_FAILED;
11950 if (tg3_flag(tp, TSO_CAPABLE) &&
11951 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11952 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11953 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11954 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11955 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11958 /* Re-enable gphy autopowerdown. */
11959 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11960 tg3_phy_toggle_apd(tp, true);
11963 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11966 tp->phy_flags |= eee_cap;
11971 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11974 struct tg3 *tp = netdev_priv(dev);
11975 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11977 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11978 tg3_power_up(tp)) {
11979 etest->flags |= ETH_TEST_FL_FAILED;
11980 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11984 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11986 if (tg3_test_nvram(tp) != 0) {
11987 etest->flags |= ETH_TEST_FL_FAILED;
11990 if (!doextlpbk && tg3_test_link(tp)) {
11991 etest->flags |= ETH_TEST_FL_FAILED;
11994 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11995 int err, err2 = 0, irq_sync = 0;
11997 if (netif_running(dev)) {
11999 tg3_netif_stop(tp);
12003 tg3_full_lock(tp, irq_sync);
12005 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12006 err = tg3_nvram_lock(tp);
12007 tg3_halt_cpu(tp, RX_CPU_BASE);
12008 if (!tg3_flag(tp, 5705_PLUS))
12009 tg3_halt_cpu(tp, TX_CPU_BASE);
12011 tg3_nvram_unlock(tp);
12013 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12016 if (tg3_test_registers(tp) != 0) {
12017 etest->flags |= ETH_TEST_FL_FAILED;
12021 if (tg3_test_memory(tp) != 0) {
12022 etest->flags |= ETH_TEST_FL_FAILED;
12027 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12029 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12030 etest->flags |= ETH_TEST_FL_FAILED;
12032 tg3_full_unlock(tp);
12034 if (tg3_test_interrupt(tp) != 0) {
12035 etest->flags |= ETH_TEST_FL_FAILED;
12039 tg3_full_lock(tp, 0);
12041 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12042 if (netif_running(dev)) {
12043 tg3_flag_set(tp, INIT_COMPLETE);
12044 err2 = tg3_restart_hw(tp, 1);
12046 tg3_netif_start(tp);
12049 tg3_full_unlock(tp);
12051 if (irq_sync && !err2)
12054 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12055 tg3_power_down(tp);
12059 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12061 struct mii_ioctl_data *data = if_mii(ifr);
12062 struct tg3 *tp = netdev_priv(dev);
12065 if (tg3_flag(tp, USE_PHYLIB)) {
12066 struct phy_device *phydev;
12067 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12069 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12070 return phy_mii_ioctl(phydev, ifr, cmd);
12075 data->phy_id = tp->phy_addr;
12078 case SIOCGMIIREG: {
12081 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12082 break; /* We have no PHY */
12084 if (!netif_running(dev))
12087 spin_lock_bh(&tp->lock);
12088 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12089 spin_unlock_bh(&tp->lock);
12091 data->val_out = mii_regval;
12097 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12098 break; /* We have no PHY */
12100 if (!netif_running(dev))
12103 spin_lock_bh(&tp->lock);
12104 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12105 spin_unlock_bh(&tp->lock);
12113 return -EOPNOTSUPP;
12116 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12118 struct tg3 *tp = netdev_priv(dev);
12120 memcpy(ec, &tp->coal, sizeof(*ec));
12124 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12126 struct tg3 *tp = netdev_priv(dev);
12127 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12128 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12130 if (!tg3_flag(tp, 5705_PLUS)) {
12131 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12132 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12133 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12134 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12137 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12138 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12139 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12140 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12141 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12142 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12143 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12144 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12145 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12146 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12149 /* No rx interrupts will be generated if both are zero */
12150 if ((ec->rx_coalesce_usecs == 0) &&
12151 (ec->rx_max_coalesced_frames == 0))
12154 /* No tx interrupts will be generated if both are zero */
12155 if ((ec->tx_coalesce_usecs == 0) &&
12156 (ec->tx_max_coalesced_frames == 0))
12159 /* Only copy relevant parameters, ignore all others. */
12160 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12161 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12162 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12163 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12164 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12165 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12166 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12167 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12168 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12170 if (netif_running(dev)) {
12171 tg3_full_lock(tp, 0);
12172 __tg3_set_coalesce(tp, &tp->coal);
12173 tg3_full_unlock(tp);
12178 static const struct ethtool_ops tg3_ethtool_ops = {
12179 .get_settings = tg3_get_settings,
12180 .set_settings = tg3_set_settings,
12181 .get_drvinfo = tg3_get_drvinfo,
12182 .get_regs_len = tg3_get_regs_len,
12183 .get_regs = tg3_get_regs,
12184 .get_wol = tg3_get_wol,
12185 .set_wol = tg3_set_wol,
12186 .get_msglevel = tg3_get_msglevel,
12187 .set_msglevel = tg3_set_msglevel,
12188 .nway_reset = tg3_nway_reset,
12189 .get_link = ethtool_op_get_link,
12190 .get_eeprom_len = tg3_get_eeprom_len,
12191 .get_eeprom = tg3_get_eeprom,
12192 .set_eeprom = tg3_set_eeprom,
12193 .get_ringparam = tg3_get_ringparam,
12194 .set_ringparam = tg3_set_ringparam,
12195 .get_pauseparam = tg3_get_pauseparam,
12196 .set_pauseparam = tg3_set_pauseparam,
12197 .self_test = tg3_self_test,
12198 .get_strings = tg3_get_strings,
12199 .set_phys_id = tg3_set_phys_id,
12200 .get_ethtool_stats = tg3_get_ethtool_stats,
12201 .get_coalesce = tg3_get_coalesce,
12202 .set_coalesce = tg3_set_coalesce,
12203 .get_sset_count = tg3_get_sset_count,
12204 .get_rxnfc = tg3_get_rxnfc,
12205 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12206 .get_rxfh_indir = tg3_get_rxfh_indir,
12207 .set_rxfh_indir = tg3_set_rxfh_indir,
12210 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12211 struct rtnl_link_stats64 *stats)
12213 struct tg3 *tp = netdev_priv(dev);
12216 return &tp->net_stats_prev;
12218 spin_lock_bh(&tp->lock);
12219 tg3_get_nstats(tp, stats);
12220 spin_unlock_bh(&tp->lock);
12225 static void tg3_set_rx_mode(struct net_device *dev)
12227 struct tg3 *tp = netdev_priv(dev);
12229 if (!netif_running(dev))
12232 tg3_full_lock(tp, 0);
12233 __tg3_set_rx_mode(dev);
12234 tg3_full_unlock(tp);
12237 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12240 dev->mtu = new_mtu;
12242 if (new_mtu > ETH_DATA_LEN) {
12243 if (tg3_flag(tp, 5780_CLASS)) {
12244 netdev_update_features(dev);
12245 tg3_flag_clear(tp, TSO_CAPABLE);
12247 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12250 if (tg3_flag(tp, 5780_CLASS)) {
12251 tg3_flag_set(tp, TSO_CAPABLE);
12252 netdev_update_features(dev);
12254 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12258 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12260 struct tg3 *tp = netdev_priv(dev);
12263 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12266 if (!netif_running(dev)) {
12267 /* We'll just catch it later when the
12270 tg3_set_mtu(dev, tp, new_mtu);
12276 tg3_netif_stop(tp);
12278 tg3_full_lock(tp, 1);
12280 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12282 tg3_set_mtu(dev, tp, new_mtu);
12284 err = tg3_restart_hw(tp, 0);
12287 tg3_netif_start(tp);
12289 tg3_full_unlock(tp);
12297 static const struct net_device_ops tg3_netdev_ops = {
12298 .ndo_open = tg3_open,
12299 .ndo_stop = tg3_close,
12300 .ndo_start_xmit = tg3_start_xmit,
12301 .ndo_get_stats64 = tg3_get_stats64,
12302 .ndo_validate_addr = eth_validate_addr,
12303 .ndo_set_rx_mode = tg3_set_rx_mode,
12304 .ndo_set_mac_address = tg3_set_mac_addr,
12305 .ndo_do_ioctl = tg3_ioctl,
12306 .ndo_tx_timeout = tg3_tx_timeout,
12307 .ndo_change_mtu = tg3_change_mtu,
12308 .ndo_fix_features = tg3_fix_features,
12309 .ndo_set_features = tg3_set_features,
12310 #ifdef CONFIG_NET_POLL_CONTROLLER
12311 .ndo_poll_controller = tg3_poll_controller,
12315 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12317 u32 cursize, val, magic;
12319 tp->nvram_size = EEPROM_CHIP_SIZE;
12321 if (tg3_nvram_read(tp, 0, &magic) != 0)
12324 if ((magic != TG3_EEPROM_MAGIC) &&
12325 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12326 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12330 * Size the chip by reading offsets at increasing powers of two.
12331 * When we encounter our validation signature, we know the addressing
12332 * has wrapped around, and thus have our chip size.
12336 while (cursize < tp->nvram_size) {
12337 if (tg3_nvram_read(tp, cursize, &val) != 0)
12346 tp->nvram_size = cursize;
12349 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12353 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12356 /* Selfboot format */
12357 if (val != TG3_EEPROM_MAGIC) {
12358 tg3_get_eeprom_size(tp);
12362 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12364 /* This is confusing. We want to operate on the
12365 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12366 * call will read from NVRAM and byteswap the data
12367 * according to the byteswapping settings for all
12368 * other register accesses. This ensures the data we
12369 * want will always reside in the lower 16-bits.
12370 * However, the data in NVRAM is in LE format, which
12371 * means the data from the NVRAM read will always be
12372 * opposite the endianness of the CPU. The 16-bit
12373 * byteswap then brings the data to CPU endianness.
12375 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12379 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12382 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12386 nvcfg1 = tr32(NVRAM_CFG1);
12387 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12388 tg3_flag_set(tp, FLASH);
12390 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12391 tw32(NVRAM_CFG1, nvcfg1);
12394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12395 tg3_flag(tp, 5780_CLASS)) {
12396 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12397 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12398 tp->nvram_jedecnum = JEDEC_ATMEL;
12399 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12400 tg3_flag_set(tp, NVRAM_BUFFERED);
12402 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12403 tp->nvram_jedecnum = JEDEC_ATMEL;
12404 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12406 case FLASH_VENDOR_ATMEL_EEPROM:
12407 tp->nvram_jedecnum = JEDEC_ATMEL;
12408 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12409 tg3_flag_set(tp, NVRAM_BUFFERED);
12411 case FLASH_VENDOR_ST:
12412 tp->nvram_jedecnum = JEDEC_ST;
12413 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12414 tg3_flag_set(tp, NVRAM_BUFFERED);
12416 case FLASH_VENDOR_SAIFUN:
12417 tp->nvram_jedecnum = JEDEC_SAIFUN;
12418 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12420 case FLASH_VENDOR_SST_SMALL:
12421 case FLASH_VENDOR_SST_LARGE:
12422 tp->nvram_jedecnum = JEDEC_SST;
12423 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12427 tp->nvram_jedecnum = JEDEC_ATMEL;
12428 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12429 tg3_flag_set(tp, NVRAM_BUFFERED);
12433 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12435 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12436 case FLASH_5752PAGE_SIZE_256:
12437 tp->nvram_pagesize = 256;
12439 case FLASH_5752PAGE_SIZE_512:
12440 tp->nvram_pagesize = 512;
12442 case FLASH_5752PAGE_SIZE_1K:
12443 tp->nvram_pagesize = 1024;
12445 case FLASH_5752PAGE_SIZE_2K:
12446 tp->nvram_pagesize = 2048;
12448 case FLASH_5752PAGE_SIZE_4K:
12449 tp->nvram_pagesize = 4096;
12451 case FLASH_5752PAGE_SIZE_264:
12452 tp->nvram_pagesize = 264;
12454 case FLASH_5752PAGE_SIZE_528:
12455 tp->nvram_pagesize = 528;
12460 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12464 nvcfg1 = tr32(NVRAM_CFG1);
12466 /* NVRAM protection for TPM */
12467 if (nvcfg1 & (1 << 27))
12468 tg3_flag_set(tp, PROTECTED_NVRAM);
12470 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12471 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12472 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12473 tp->nvram_jedecnum = JEDEC_ATMEL;
12474 tg3_flag_set(tp, NVRAM_BUFFERED);
12476 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12477 tp->nvram_jedecnum = JEDEC_ATMEL;
12478 tg3_flag_set(tp, NVRAM_BUFFERED);
12479 tg3_flag_set(tp, FLASH);
12481 case FLASH_5752VENDOR_ST_M45PE10:
12482 case FLASH_5752VENDOR_ST_M45PE20:
12483 case FLASH_5752VENDOR_ST_M45PE40:
12484 tp->nvram_jedecnum = JEDEC_ST;
12485 tg3_flag_set(tp, NVRAM_BUFFERED);
12486 tg3_flag_set(tp, FLASH);
12490 if (tg3_flag(tp, FLASH)) {
12491 tg3_nvram_get_pagesize(tp, nvcfg1);
12493 /* For eeprom, set pagesize to maximum eeprom size */
12494 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12496 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12497 tw32(NVRAM_CFG1, nvcfg1);
12501 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12503 u32 nvcfg1, protect = 0;
12505 nvcfg1 = tr32(NVRAM_CFG1);
12507 /* NVRAM protection for TPM */
12508 if (nvcfg1 & (1 << 27)) {
12509 tg3_flag_set(tp, PROTECTED_NVRAM);
12513 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12515 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12516 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12517 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12518 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12519 tp->nvram_jedecnum = JEDEC_ATMEL;
12520 tg3_flag_set(tp, NVRAM_BUFFERED);
12521 tg3_flag_set(tp, FLASH);
12522 tp->nvram_pagesize = 264;
12523 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12524 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12525 tp->nvram_size = (protect ? 0x3e200 :
12526 TG3_NVRAM_SIZE_512KB);
12527 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12528 tp->nvram_size = (protect ? 0x1f200 :
12529 TG3_NVRAM_SIZE_256KB);
12531 tp->nvram_size = (protect ? 0x1f200 :
12532 TG3_NVRAM_SIZE_128KB);
12534 case FLASH_5752VENDOR_ST_M45PE10:
12535 case FLASH_5752VENDOR_ST_M45PE20:
12536 case FLASH_5752VENDOR_ST_M45PE40:
12537 tp->nvram_jedecnum = JEDEC_ST;
12538 tg3_flag_set(tp, NVRAM_BUFFERED);
12539 tg3_flag_set(tp, FLASH);
12540 tp->nvram_pagesize = 256;
12541 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12542 tp->nvram_size = (protect ?
12543 TG3_NVRAM_SIZE_64KB :
12544 TG3_NVRAM_SIZE_128KB);
12545 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12546 tp->nvram_size = (protect ?
12547 TG3_NVRAM_SIZE_64KB :
12548 TG3_NVRAM_SIZE_256KB);
12550 tp->nvram_size = (protect ?
12551 TG3_NVRAM_SIZE_128KB :
12552 TG3_NVRAM_SIZE_512KB);
12557 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12561 nvcfg1 = tr32(NVRAM_CFG1);
12563 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12564 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12565 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12566 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12567 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12568 tp->nvram_jedecnum = JEDEC_ATMEL;
12569 tg3_flag_set(tp, NVRAM_BUFFERED);
12570 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12572 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12573 tw32(NVRAM_CFG1, nvcfg1);
12575 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12576 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12577 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12578 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12579 tp->nvram_jedecnum = JEDEC_ATMEL;
12580 tg3_flag_set(tp, NVRAM_BUFFERED);
12581 tg3_flag_set(tp, FLASH);
12582 tp->nvram_pagesize = 264;
12584 case FLASH_5752VENDOR_ST_M45PE10:
12585 case FLASH_5752VENDOR_ST_M45PE20:
12586 case FLASH_5752VENDOR_ST_M45PE40:
12587 tp->nvram_jedecnum = JEDEC_ST;
12588 tg3_flag_set(tp, NVRAM_BUFFERED);
12589 tg3_flag_set(tp, FLASH);
12590 tp->nvram_pagesize = 256;
12595 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12597 u32 nvcfg1, protect = 0;
12599 nvcfg1 = tr32(NVRAM_CFG1);
12601 /* NVRAM protection for TPM */
12602 if (nvcfg1 & (1 << 27)) {
12603 tg3_flag_set(tp, PROTECTED_NVRAM);
12607 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12609 case FLASH_5761VENDOR_ATMEL_ADB021D:
12610 case FLASH_5761VENDOR_ATMEL_ADB041D:
12611 case FLASH_5761VENDOR_ATMEL_ADB081D:
12612 case FLASH_5761VENDOR_ATMEL_ADB161D:
12613 case FLASH_5761VENDOR_ATMEL_MDB021D:
12614 case FLASH_5761VENDOR_ATMEL_MDB041D:
12615 case FLASH_5761VENDOR_ATMEL_MDB081D:
12616 case FLASH_5761VENDOR_ATMEL_MDB161D:
12617 tp->nvram_jedecnum = JEDEC_ATMEL;
12618 tg3_flag_set(tp, NVRAM_BUFFERED);
12619 tg3_flag_set(tp, FLASH);
12620 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12621 tp->nvram_pagesize = 256;
12623 case FLASH_5761VENDOR_ST_A_M45PE20:
12624 case FLASH_5761VENDOR_ST_A_M45PE40:
12625 case FLASH_5761VENDOR_ST_A_M45PE80:
12626 case FLASH_5761VENDOR_ST_A_M45PE16:
12627 case FLASH_5761VENDOR_ST_M_M45PE20:
12628 case FLASH_5761VENDOR_ST_M_M45PE40:
12629 case FLASH_5761VENDOR_ST_M_M45PE80:
12630 case FLASH_5761VENDOR_ST_M_M45PE16:
12631 tp->nvram_jedecnum = JEDEC_ST;
12632 tg3_flag_set(tp, NVRAM_BUFFERED);
12633 tg3_flag_set(tp, FLASH);
12634 tp->nvram_pagesize = 256;
12639 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12642 case FLASH_5761VENDOR_ATMEL_ADB161D:
12643 case FLASH_5761VENDOR_ATMEL_MDB161D:
12644 case FLASH_5761VENDOR_ST_A_M45PE16:
12645 case FLASH_5761VENDOR_ST_M_M45PE16:
12646 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12648 case FLASH_5761VENDOR_ATMEL_ADB081D:
12649 case FLASH_5761VENDOR_ATMEL_MDB081D:
12650 case FLASH_5761VENDOR_ST_A_M45PE80:
12651 case FLASH_5761VENDOR_ST_M_M45PE80:
12652 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12654 case FLASH_5761VENDOR_ATMEL_ADB041D:
12655 case FLASH_5761VENDOR_ATMEL_MDB041D:
12656 case FLASH_5761VENDOR_ST_A_M45PE40:
12657 case FLASH_5761VENDOR_ST_M_M45PE40:
12658 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12660 case FLASH_5761VENDOR_ATMEL_ADB021D:
12661 case FLASH_5761VENDOR_ATMEL_MDB021D:
12662 case FLASH_5761VENDOR_ST_A_M45PE20:
12663 case FLASH_5761VENDOR_ST_M_M45PE20:
12664 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12670 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12672 tp->nvram_jedecnum = JEDEC_ATMEL;
12673 tg3_flag_set(tp, NVRAM_BUFFERED);
12674 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12677 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12681 nvcfg1 = tr32(NVRAM_CFG1);
12683 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12684 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12685 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12686 tp->nvram_jedecnum = JEDEC_ATMEL;
12687 tg3_flag_set(tp, NVRAM_BUFFERED);
12688 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12690 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12691 tw32(NVRAM_CFG1, nvcfg1);
12693 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12694 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12695 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12696 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12697 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12698 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12699 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12700 tp->nvram_jedecnum = JEDEC_ATMEL;
12701 tg3_flag_set(tp, NVRAM_BUFFERED);
12702 tg3_flag_set(tp, FLASH);
12704 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12705 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12706 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12707 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12708 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12710 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12711 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12712 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12714 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12715 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12716 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12720 case FLASH_5752VENDOR_ST_M45PE10:
12721 case FLASH_5752VENDOR_ST_M45PE20:
12722 case FLASH_5752VENDOR_ST_M45PE40:
12723 tp->nvram_jedecnum = JEDEC_ST;
12724 tg3_flag_set(tp, NVRAM_BUFFERED);
12725 tg3_flag_set(tp, FLASH);
12727 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12728 case FLASH_5752VENDOR_ST_M45PE10:
12729 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12731 case FLASH_5752VENDOR_ST_M45PE20:
12732 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12734 case FLASH_5752VENDOR_ST_M45PE40:
12735 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12740 tg3_flag_set(tp, NO_NVRAM);
12744 tg3_nvram_get_pagesize(tp, nvcfg1);
12745 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12746 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12750 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12754 nvcfg1 = tr32(NVRAM_CFG1);
12756 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12757 case FLASH_5717VENDOR_ATMEL_EEPROM:
12758 case FLASH_5717VENDOR_MICRO_EEPROM:
12759 tp->nvram_jedecnum = JEDEC_ATMEL;
12760 tg3_flag_set(tp, NVRAM_BUFFERED);
12761 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12763 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12764 tw32(NVRAM_CFG1, nvcfg1);
12766 case FLASH_5717VENDOR_ATMEL_MDB011D:
12767 case FLASH_5717VENDOR_ATMEL_ADB011B:
12768 case FLASH_5717VENDOR_ATMEL_ADB011D:
12769 case FLASH_5717VENDOR_ATMEL_MDB021D:
12770 case FLASH_5717VENDOR_ATMEL_ADB021B:
12771 case FLASH_5717VENDOR_ATMEL_ADB021D:
12772 case FLASH_5717VENDOR_ATMEL_45USPT:
12773 tp->nvram_jedecnum = JEDEC_ATMEL;
12774 tg3_flag_set(tp, NVRAM_BUFFERED);
12775 tg3_flag_set(tp, FLASH);
12777 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12778 case FLASH_5717VENDOR_ATMEL_MDB021D:
12779 /* Detect size with tg3_nvram_get_size() */
12781 case FLASH_5717VENDOR_ATMEL_ADB021B:
12782 case FLASH_5717VENDOR_ATMEL_ADB021D:
12783 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12786 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12790 case FLASH_5717VENDOR_ST_M_M25PE10:
12791 case FLASH_5717VENDOR_ST_A_M25PE10:
12792 case FLASH_5717VENDOR_ST_M_M45PE10:
12793 case FLASH_5717VENDOR_ST_A_M45PE10:
12794 case FLASH_5717VENDOR_ST_M_M25PE20:
12795 case FLASH_5717VENDOR_ST_A_M25PE20:
12796 case FLASH_5717VENDOR_ST_M_M45PE20:
12797 case FLASH_5717VENDOR_ST_A_M45PE20:
12798 case FLASH_5717VENDOR_ST_25USPT:
12799 case FLASH_5717VENDOR_ST_45USPT:
12800 tp->nvram_jedecnum = JEDEC_ST;
12801 tg3_flag_set(tp, NVRAM_BUFFERED);
12802 tg3_flag_set(tp, FLASH);
12804 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12805 case FLASH_5717VENDOR_ST_M_M25PE20:
12806 case FLASH_5717VENDOR_ST_M_M45PE20:
12807 /* Detect size with tg3_nvram_get_size() */
12809 case FLASH_5717VENDOR_ST_A_M25PE20:
12810 case FLASH_5717VENDOR_ST_A_M45PE20:
12811 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12814 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12819 tg3_flag_set(tp, NO_NVRAM);
12823 tg3_nvram_get_pagesize(tp, nvcfg1);
12824 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12825 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12828 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12830 u32 nvcfg1, nvmpinstrp;
12832 nvcfg1 = tr32(NVRAM_CFG1);
12833 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12835 switch (nvmpinstrp) {
12836 case FLASH_5720_EEPROM_HD:
12837 case FLASH_5720_EEPROM_LD:
12838 tp->nvram_jedecnum = JEDEC_ATMEL;
12839 tg3_flag_set(tp, NVRAM_BUFFERED);
12841 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12842 tw32(NVRAM_CFG1, nvcfg1);
12843 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12844 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12846 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12848 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12849 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12850 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12851 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12852 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12853 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12854 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12855 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12856 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12857 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12858 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12859 case FLASH_5720VENDOR_ATMEL_45USPT:
12860 tp->nvram_jedecnum = JEDEC_ATMEL;
12861 tg3_flag_set(tp, NVRAM_BUFFERED);
12862 tg3_flag_set(tp, FLASH);
12864 switch (nvmpinstrp) {
12865 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12866 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12867 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12868 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12870 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12871 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12872 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12873 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12875 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12876 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12877 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12880 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12884 case FLASH_5720VENDOR_M_ST_M25PE10:
12885 case FLASH_5720VENDOR_M_ST_M45PE10:
12886 case FLASH_5720VENDOR_A_ST_M25PE10:
12887 case FLASH_5720VENDOR_A_ST_M45PE10:
12888 case FLASH_5720VENDOR_M_ST_M25PE20:
12889 case FLASH_5720VENDOR_M_ST_M45PE20:
12890 case FLASH_5720VENDOR_A_ST_M25PE20:
12891 case FLASH_5720VENDOR_A_ST_M45PE20:
12892 case FLASH_5720VENDOR_M_ST_M25PE40:
12893 case FLASH_5720VENDOR_M_ST_M45PE40:
12894 case FLASH_5720VENDOR_A_ST_M25PE40:
12895 case FLASH_5720VENDOR_A_ST_M45PE40:
12896 case FLASH_5720VENDOR_M_ST_M25PE80:
12897 case FLASH_5720VENDOR_M_ST_M45PE80:
12898 case FLASH_5720VENDOR_A_ST_M25PE80:
12899 case FLASH_5720VENDOR_A_ST_M45PE80:
12900 case FLASH_5720VENDOR_ST_25USPT:
12901 case FLASH_5720VENDOR_ST_45USPT:
12902 tp->nvram_jedecnum = JEDEC_ST;
12903 tg3_flag_set(tp, NVRAM_BUFFERED);
12904 tg3_flag_set(tp, FLASH);
12906 switch (nvmpinstrp) {
12907 case FLASH_5720VENDOR_M_ST_M25PE20:
12908 case FLASH_5720VENDOR_M_ST_M45PE20:
12909 case FLASH_5720VENDOR_A_ST_M25PE20:
12910 case FLASH_5720VENDOR_A_ST_M45PE20:
12911 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12913 case FLASH_5720VENDOR_M_ST_M25PE40:
12914 case FLASH_5720VENDOR_M_ST_M45PE40:
12915 case FLASH_5720VENDOR_A_ST_M25PE40:
12916 case FLASH_5720VENDOR_A_ST_M45PE40:
12917 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12919 case FLASH_5720VENDOR_M_ST_M25PE80:
12920 case FLASH_5720VENDOR_M_ST_M45PE80:
12921 case FLASH_5720VENDOR_A_ST_M25PE80:
12922 case FLASH_5720VENDOR_A_ST_M45PE80:
12923 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12926 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12931 tg3_flag_set(tp, NO_NVRAM);
12935 tg3_nvram_get_pagesize(tp, nvcfg1);
12936 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12937 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12940 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12941 static void __devinit tg3_nvram_init(struct tg3 *tp)
12943 tw32_f(GRC_EEPROM_ADDR,
12944 (EEPROM_ADDR_FSM_RESET |
12945 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12946 EEPROM_ADDR_CLKPERD_SHIFT)));
12950 /* Enable seeprom accesses. */
12951 tw32_f(GRC_LOCAL_CTRL,
12952 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12955 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12956 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12957 tg3_flag_set(tp, NVRAM);
12959 if (tg3_nvram_lock(tp)) {
12960 netdev_warn(tp->dev,
12961 "Cannot get nvram lock, %s failed\n",
12965 tg3_enable_nvram_access(tp);
12967 tp->nvram_size = 0;
12969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12970 tg3_get_5752_nvram_info(tp);
12971 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12972 tg3_get_5755_nvram_info(tp);
12973 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12976 tg3_get_5787_nvram_info(tp);
12977 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12978 tg3_get_5761_nvram_info(tp);
12979 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12980 tg3_get_5906_nvram_info(tp);
12981 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12982 tg3_flag(tp, 57765_CLASS))
12983 tg3_get_57780_nvram_info(tp);
12984 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12986 tg3_get_5717_nvram_info(tp);
12987 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12988 tg3_get_5720_nvram_info(tp);
12990 tg3_get_nvram_info(tp);
12992 if (tp->nvram_size == 0)
12993 tg3_get_nvram_size(tp);
12995 tg3_disable_nvram_access(tp);
12996 tg3_nvram_unlock(tp);
12999 tg3_flag_clear(tp, NVRAM);
13000 tg3_flag_clear(tp, NVRAM_BUFFERED);
13002 tg3_get_eeprom_size(tp);
13006 struct subsys_tbl_ent {
13007 u16 subsys_vendor, subsys_devid;
13011 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13012 /* Broadcom boards. */
13013 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13014 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13015 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13016 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13017 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13018 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13019 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13020 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13021 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13022 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13023 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13024 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13025 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13026 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13027 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13028 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13029 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13030 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13031 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13032 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13033 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13034 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13037 { TG3PCI_SUBVENDOR_ID_3COM,
13038 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13039 { TG3PCI_SUBVENDOR_ID_3COM,
13040 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13041 { TG3PCI_SUBVENDOR_ID_3COM,
13042 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13043 { TG3PCI_SUBVENDOR_ID_3COM,
13044 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13045 { TG3PCI_SUBVENDOR_ID_3COM,
13046 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13049 { TG3PCI_SUBVENDOR_ID_DELL,
13050 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13051 { TG3PCI_SUBVENDOR_ID_DELL,
13052 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13053 { TG3PCI_SUBVENDOR_ID_DELL,
13054 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13055 { TG3PCI_SUBVENDOR_ID_DELL,
13056 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13058 /* Compaq boards. */
13059 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13060 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13061 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13062 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13063 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13064 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13065 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13066 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13067 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13068 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13071 { TG3PCI_SUBVENDOR_ID_IBM,
13072 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13075 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13079 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13080 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13081 tp->pdev->subsystem_vendor) &&
13082 (subsys_id_to_phy_id[i].subsys_devid ==
13083 tp->pdev->subsystem_device))
13084 return &subsys_id_to_phy_id[i];
13089 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13093 tp->phy_id = TG3_PHY_ID_INVALID;
13094 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13096 /* Assume an onboard device and WOL capable by default. */
13097 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13098 tg3_flag_set(tp, WOL_CAP);
13100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13101 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13102 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13103 tg3_flag_set(tp, IS_NIC);
13105 val = tr32(VCPU_CFGSHDW);
13106 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13107 tg3_flag_set(tp, ASPM_WORKAROUND);
13108 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13109 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13110 tg3_flag_set(tp, WOL_ENABLE);
13111 device_set_wakeup_enable(&tp->pdev->dev, true);
13116 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13117 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13118 u32 nic_cfg, led_cfg;
13119 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13120 int eeprom_phy_serdes = 0;
13122 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13123 tp->nic_sram_data_cfg = nic_cfg;
13125 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13126 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13127 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13128 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13129 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13130 (ver > 0) && (ver < 0x100))
13131 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13134 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13136 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13137 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13138 eeprom_phy_serdes = 1;
13140 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13141 if (nic_phy_id != 0) {
13142 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13143 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13145 eeprom_phy_id = (id1 >> 16) << 10;
13146 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13147 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13151 tp->phy_id = eeprom_phy_id;
13152 if (eeprom_phy_serdes) {
13153 if (!tg3_flag(tp, 5705_PLUS))
13154 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13156 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13159 if (tg3_flag(tp, 5750_PLUS))
13160 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13161 SHASTA_EXT_LED_MODE_MASK);
13163 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13167 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13168 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13171 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13172 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13175 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13176 tp->led_ctrl = LED_CTRL_MODE_MAC;
13178 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13179 * read on some older 5700/5701 bootcode.
13181 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13183 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13185 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13189 case SHASTA_EXT_LED_SHARED:
13190 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13191 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13192 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13193 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13194 LED_CTRL_MODE_PHY_2);
13197 case SHASTA_EXT_LED_MAC:
13198 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13201 case SHASTA_EXT_LED_COMBO:
13202 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13203 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13204 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13205 LED_CTRL_MODE_PHY_2);
13210 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13211 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13212 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13213 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13215 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13216 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13218 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13219 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13220 if ((tp->pdev->subsystem_vendor ==
13221 PCI_VENDOR_ID_ARIMA) &&
13222 (tp->pdev->subsystem_device == 0x205a ||
13223 tp->pdev->subsystem_device == 0x2063))
13224 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13226 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13227 tg3_flag_set(tp, IS_NIC);
13230 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13231 tg3_flag_set(tp, ENABLE_ASF);
13232 if (tg3_flag(tp, 5750_PLUS))
13233 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13236 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13237 tg3_flag(tp, 5750_PLUS))
13238 tg3_flag_set(tp, ENABLE_APE);
13240 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13241 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13242 tg3_flag_clear(tp, WOL_CAP);
13244 if (tg3_flag(tp, WOL_CAP) &&
13245 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13246 tg3_flag_set(tp, WOL_ENABLE);
13247 device_set_wakeup_enable(&tp->pdev->dev, true);
13250 if (cfg2 & (1 << 17))
13251 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13253 /* serdes signal pre-emphasis in register 0x590 set by */
13254 /* bootcode if bit 18 is set */
13255 if (cfg2 & (1 << 18))
13256 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13258 if ((tg3_flag(tp, 57765_PLUS) ||
13259 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13260 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13261 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13262 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13264 if (tg3_flag(tp, PCI_EXPRESS) &&
13265 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13266 !tg3_flag(tp, 57765_PLUS)) {
13269 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13270 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13271 tg3_flag_set(tp, ASPM_WORKAROUND);
13274 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13275 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13276 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13277 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13278 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13279 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13282 if (tg3_flag(tp, WOL_CAP))
13283 device_set_wakeup_enable(&tp->pdev->dev,
13284 tg3_flag(tp, WOL_ENABLE));
13286 device_set_wakeup_capable(&tp->pdev->dev, false);
13289 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13294 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13295 tw32(OTP_CTRL, cmd);
13297 /* Wait for up to 1 ms for command to execute. */
13298 for (i = 0; i < 100; i++) {
13299 val = tr32(OTP_STATUS);
13300 if (val & OTP_STATUS_CMD_DONE)
13305 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13308 /* Read the gphy configuration from the OTP region of the chip. The gphy
13309 * configuration is a 32-bit value that straddles the alignment boundary.
13310 * We do two 32-bit reads and then shift and merge the results.
13312 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13314 u32 bhalf_otp, thalf_otp;
13316 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13318 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13321 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13323 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13326 thalf_otp = tr32(OTP_READ_DATA);
13328 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13330 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13333 bhalf_otp = tr32(OTP_READ_DATA);
13335 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13338 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13340 u32 adv = ADVERTISED_Autoneg;
13342 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13343 adv |= ADVERTISED_1000baseT_Half |
13344 ADVERTISED_1000baseT_Full;
13346 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13347 adv |= ADVERTISED_100baseT_Half |
13348 ADVERTISED_100baseT_Full |
13349 ADVERTISED_10baseT_Half |
13350 ADVERTISED_10baseT_Full |
13353 adv |= ADVERTISED_FIBRE;
13355 tp->link_config.advertising = adv;
13356 tp->link_config.speed = SPEED_UNKNOWN;
13357 tp->link_config.duplex = DUPLEX_UNKNOWN;
13358 tp->link_config.autoneg = AUTONEG_ENABLE;
13359 tp->link_config.active_speed = SPEED_UNKNOWN;
13360 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13365 static int __devinit tg3_phy_probe(struct tg3 *tp)
13367 u32 hw_phy_id_1, hw_phy_id_2;
13368 u32 hw_phy_id, hw_phy_id_masked;
13371 /* flow control autonegotiation is default behavior */
13372 tg3_flag_set(tp, PAUSE_AUTONEG);
13373 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13375 if (tg3_flag(tp, USE_PHYLIB))
13376 return tg3_phy_init(tp);
13378 /* Reading the PHY ID register can conflict with ASF
13379 * firmware access to the PHY hardware.
13382 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13383 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13385 /* Now read the physical PHY_ID from the chip and verify
13386 * that it is sane. If it doesn't look good, we fall back
13387 * to either the hard-coded table based PHY_ID and failing
13388 * that the value found in the eeprom area.
13390 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13391 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13393 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13394 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13395 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13397 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13400 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13401 tp->phy_id = hw_phy_id;
13402 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13403 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13405 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13407 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13408 /* Do nothing, phy ID already set up in
13409 * tg3_get_eeprom_hw_cfg().
13412 struct subsys_tbl_ent *p;
13414 /* No eeprom signature? Try the hardcoded
13415 * subsys device table.
13417 p = tg3_lookup_by_subsys(tp);
13421 tp->phy_id = p->phy_id;
13423 tp->phy_id == TG3_PHY_ID_BCM8002)
13424 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13428 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13429 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13430 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13431 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13432 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13433 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13434 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13435 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13437 tg3_phy_init_link_config(tp);
13439 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13440 !tg3_flag(tp, ENABLE_APE) &&
13441 !tg3_flag(tp, ENABLE_ASF)) {
13444 tg3_readphy(tp, MII_BMSR, &bmsr);
13445 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13446 (bmsr & BMSR_LSTATUS))
13447 goto skip_phy_reset;
13449 err = tg3_phy_reset(tp);
13453 tg3_phy_set_wirespeed(tp);
13455 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13456 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13457 tp->link_config.flowctrl);
13459 tg3_writephy(tp, MII_BMCR,
13460 BMCR_ANENABLE | BMCR_ANRESTART);
13465 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13466 err = tg3_init_5401phy_dsp(tp);
13470 err = tg3_init_5401phy_dsp(tp);
13476 static void __devinit tg3_read_vpd(struct tg3 *tp)
13479 unsigned int block_end, rosize, len;
13483 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13487 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13489 goto out_not_found;
13491 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13492 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13493 i += PCI_VPD_LRDT_TAG_SIZE;
13495 if (block_end > vpdlen)
13496 goto out_not_found;
13498 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13499 PCI_VPD_RO_KEYWORD_MFR_ID);
13501 len = pci_vpd_info_field_size(&vpd_data[j]);
13503 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13504 if (j + len > block_end || len != 4 ||
13505 memcmp(&vpd_data[j], "1028", 4))
13508 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13509 PCI_VPD_RO_KEYWORD_VENDOR0);
13513 len = pci_vpd_info_field_size(&vpd_data[j]);
13515 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13516 if (j + len > block_end)
13519 memcpy(tp->fw_ver, &vpd_data[j], len);
13520 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13524 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13525 PCI_VPD_RO_KEYWORD_PARTNO);
13527 goto out_not_found;
13529 len = pci_vpd_info_field_size(&vpd_data[i]);
13531 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13532 if (len > TG3_BPN_SIZE ||
13533 (len + i) > vpdlen)
13534 goto out_not_found;
13536 memcpy(tp->board_part_number, &vpd_data[i], len);
13540 if (tp->board_part_number[0])
13544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13545 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13546 strcpy(tp->board_part_number, "BCM5717");
13547 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13548 strcpy(tp->board_part_number, "BCM5718");
13551 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13552 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13553 strcpy(tp->board_part_number, "BCM57780");
13554 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13555 strcpy(tp->board_part_number, "BCM57760");
13556 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13557 strcpy(tp->board_part_number, "BCM57790");
13558 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13559 strcpy(tp->board_part_number, "BCM57788");
13562 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13563 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13564 strcpy(tp->board_part_number, "BCM57761");
13565 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13566 strcpy(tp->board_part_number, "BCM57765");
13567 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13568 strcpy(tp->board_part_number, "BCM57781");
13569 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13570 strcpy(tp->board_part_number, "BCM57785");
13571 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13572 strcpy(tp->board_part_number, "BCM57791");
13573 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13574 strcpy(tp->board_part_number, "BCM57795");
13577 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13578 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13579 strcpy(tp->board_part_number, "BCM57762");
13580 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13581 strcpy(tp->board_part_number, "BCM57766");
13582 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13583 strcpy(tp->board_part_number, "BCM57782");
13584 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13585 strcpy(tp->board_part_number, "BCM57786");
13588 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13589 strcpy(tp->board_part_number, "BCM95906");
13592 strcpy(tp->board_part_number, "none");
13596 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13600 if (tg3_nvram_read(tp, offset, &val) ||
13601 (val & 0xfc000000) != 0x0c000000 ||
13602 tg3_nvram_read(tp, offset + 4, &val) ||
13609 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13611 u32 val, offset, start, ver_offset;
13613 bool newver = false;
13615 if (tg3_nvram_read(tp, 0xc, &offset) ||
13616 tg3_nvram_read(tp, 0x4, &start))
13619 offset = tg3_nvram_logical_addr(tp, offset);
13621 if (tg3_nvram_read(tp, offset, &val))
13624 if ((val & 0xfc000000) == 0x0c000000) {
13625 if (tg3_nvram_read(tp, offset + 4, &val))
13632 dst_off = strlen(tp->fw_ver);
13635 if (TG3_VER_SIZE - dst_off < 16 ||
13636 tg3_nvram_read(tp, offset + 8, &ver_offset))
13639 offset = offset + ver_offset - start;
13640 for (i = 0; i < 16; i += 4) {
13642 if (tg3_nvram_read_be32(tp, offset + i, &v))
13645 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13650 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13653 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13654 TG3_NVM_BCVER_MAJSFT;
13655 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13656 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13657 "v%d.%02d", major, minor);
13661 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13663 u32 val, major, minor;
13665 /* Use native endian representation */
13666 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13669 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13670 TG3_NVM_HWSB_CFG1_MAJSFT;
13671 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13672 TG3_NVM_HWSB_CFG1_MINSFT;
13674 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13677 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13679 u32 offset, major, minor, build;
13681 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13683 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13686 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13687 case TG3_EEPROM_SB_REVISION_0:
13688 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13690 case TG3_EEPROM_SB_REVISION_2:
13691 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13693 case TG3_EEPROM_SB_REVISION_3:
13694 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13696 case TG3_EEPROM_SB_REVISION_4:
13697 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13699 case TG3_EEPROM_SB_REVISION_5:
13700 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13702 case TG3_EEPROM_SB_REVISION_6:
13703 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13709 if (tg3_nvram_read(tp, offset, &val))
13712 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13713 TG3_EEPROM_SB_EDH_BLD_SHFT;
13714 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13715 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13716 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13718 if (minor > 99 || build > 26)
13721 offset = strlen(tp->fw_ver);
13722 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13723 " v%d.%02d", major, minor);
13726 offset = strlen(tp->fw_ver);
13727 if (offset < TG3_VER_SIZE - 1)
13728 tp->fw_ver[offset] = 'a' + build - 1;
13732 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13734 u32 val, offset, start;
13737 for (offset = TG3_NVM_DIR_START;
13738 offset < TG3_NVM_DIR_END;
13739 offset += TG3_NVM_DIRENT_SIZE) {
13740 if (tg3_nvram_read(tp, offset, &val))
13743 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13747 if (offset == TG3_NVM_DIR_END)
13750 if (!tg3_flag(tp, 5705_PLUS))
13751 start = 0x08000000;
13752 else if (tg3_nvram_read(tp, offset - 4, &start))
13755 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13756 !tg3_fw_img_is_valid(tp, offset) ||
13757 tg3_nvram_read(tp, offset + 8, &val))
13760 offset += val - start;
13762 vlen = strlen(tp->fw_ver);
13764 tp->fw_ver[vlen++] = ',';
13765 tp->fw_ver[vlen++] = ' ';
13767 for (i = 0; i < 4; i++) {
13769 if (tg3_nvram_read_be32(tp, offset, &v))
13772 offset += sizeof(v);
13774 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13775 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13779 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13784 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13790 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13793 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13794 if (apedata != APE_SEG_SIG_MAGIC)
13797 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13798 if (!(apedata & APE_FW_STATUS_READY))
13801 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13803 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13804 tg3_flag_set(tp, APE_HAS_NCSI);
13810 vlen = strlen(tp->fw_ver);
13812 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13814 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13815 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13816 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13817 (apedata & APE_FW_VERSION_BLDMSK));
13820 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13823 bool vpd_vers = false;
13825 if (tp->fw_ver[0] != 0)
13828 if (tg3_flag(tp, NO_NVRAM)) {
13829 strcat(tp->fw_ver, "sb");
13833 if (tg3_nvram_read(tp, 0, &val))
13836 if (val == TG3_EEPROM_MAGIC)
13837 tg3_read_bc_ver(tp);
13838 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13839 tg3_read_sb_ver(tp, val);
13840 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13841 tg3_read_hwsb_ver(tp);
13848 if (tg3_flag(tp, ENABLE_APE)) {
13849 if (tg3_flag(tp, ENABLE_ASF))
13850 tg3_read_dash_ver(tp);
13851 } else if (tg3_flag(tp, ENABLE_ASF)) {
13852 tg3_read_mgmtfw_ver(tp);
13856 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13859 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13861 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13862 return TG3_RX_RET_MAX_SIZE_5717;
13863 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13864 return TG3_RX_RET_MAX_SIZE_5700;
13866 return TG3_RX_RET_MAX_SIZE_5705;
13869 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13870 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13871 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13872 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13876 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13878 struct pci_dev *peer;
13879 unsigned int func, devnr = tp->pdev->devfn & ~7;
13881 for (func = 0; func < 8; func++) {
13882 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13883 if (peer && peer != tp->pdev)
13887 /* 5704 can be configured in single-port mode, set peer to
13888 * tp->pdev in that case.
13896 * We don't need to keep the refcount elevated; there's no way
13897 * to remove one half of this device without removing the other
13904 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13906 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13907 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13910 /* All devices that use the alternate
13911 * ASIC REV location have a CPMU.
13913 tg3_flag_set(tp, CPMU_PRESENT);
13915 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13916 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13917 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13918 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13919 reg = TG3PCI_GEN2_PRODID_ASICREV;
13920 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13921 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13922 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13923 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13924 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13925 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13926 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13927 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13928 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13929 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13930 reg = TG3PCI_GEN15_PRODID_ASICREV;
13932 reg = TG3PCI_PRODID_ASICREV;
13934 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13937 /* Wrong chip ID in 5752 A0. This code can be removed later
13938 * as A0 is not in production.
13940 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13941 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13943 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13944 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13945 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13946 tg3_flag_set(tp, 5717_PLUS);
13948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13950 tg3_flag_set(tp, 57765_CLASS);
13952 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13953 tg3_flag_set(tp, 57765_PLUS);
13955 /* Intentionally exclude ASIC_REV_5906 */
13956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13957 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13961 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13962 tg3_flag(tp, 57765_PLUS))
13963 tg3_flag_set(tp, 5755_PLUS);
13965 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13966 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13967 tg3_flag_set(tp, 5780_CLASS);
13969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13970 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13971 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13972 tg3_flag(tp, 5755_PLUS) ||
13973 tg3_flag(tp, 5780_CLASS))
13974 tg3_flag_set(tp, 5750_PLUS);
13976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13977 tg3_flag(tp, 5750_PLUS))
13978 tg3_flag_set(tp, 5705_PLUS);
13981 static int __devinit tg3_get_invariants(struct tg3 *tp)
13984 u32 pci_state_reg, grc_misc_cfg;
13989 /* Force memory write invalidate off. If we leave it on,
13990 * then on 5700_BX chips we have to enable a workaround.
13991 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13992 * to match the cacheline size. The Broadcom driver have this
13993 * workaround but turns MWI off all the times so never uses
13994 * it. This seems to suggest that the workaround is insufficient.
13996 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13997 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13998 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14000 /* Important! -- Make sure register accesses are byteswapped
14001 * correctly. Also, for those chips that require it, make
14002 * sure that indirect register accesses are enabled before
14003 * the first operation.
14005 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14007 tp->misc_host_ctrl |= (misc_ctrl_reg &
14008 MISC_HOST_CTRL_CHIPREV);
14009 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14010 tp->misc_host_ctrl);
14012 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14014 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14015 * we need to disable memory and use config. cycles
14016 * only to access all registers. The 5702/03 chips
14017 * can mistakenly decode the special cycles from the
14018 * ICH chipsets as memory write cycles, causing corruption
14019 * of register and memory space. Only certain ICH bridges
14020 * will drive special cycles with non-zero data during the
14021 * address phase which can fall within the 5703's address
14022 * range. This is not an ICH bug as the PCI spec allows
14023 * non-zero address during special cycles. However, only
14024 * these ICH bridges are known to drive non-zero addresses
14025 * during special cycles.
14027 * Since special cycles do not cross PCI bridges, we only
14028 * enable this workaround if the 5703 is on the secondary
14029 * bus of these ICH bridges.
14031 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14032 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14033 static struct tg3_dev_id {
14037 } ich_chipsets[] = {
14038 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14040 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14042 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14044 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14048 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14049 struct pci_dev *bridge = NULL;
14051 while (pci_id->vendor != 0) {
14052 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14058 if (pci_id->rev != PCI_ANY_ID) {
14059 if (bridge->revision > pci_id->rev)
14062 if (bridge->subordinate &&
14063 (bridge->subordinate->number ==
14064 tp->pdev->bus->number)) {
14065 tg3_flag_set(tp, ICH_WORKAROUND);
14066 pci_dev_put(bridge);
14072 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14073 static struct tg3_dev_id {
14076 } bridge_chipsets[] = {
14077 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14078 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14081 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14082 struct pci_dev *bridge = NULL;
14084 while (pci_id->vendor != 0) {
14085 bridge = pci_get_device(pci_id->vendor,
14092 if (bridge->subordinate &&
14093 (bridge->subordinate->number <=
14094 tp->pdev->bus->number) &&
14095 (bridge->subordinate->subordinate >=
14096 tp->pdev->bus->number)) {
14097 tg3_flag_set(tp, 5701_DMA_BUG);
14098 pci_dev_put(bridge);
14104 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14105 * DMA addresses > 40-bit. This bridge may have other additional
14106 * 57xx devices behind it in some 4-port NIC designs for example.
14107 * Any tg3 device found behind the bridge will also need the 40-bit
14110 if (tg3_flag(tp, 5780_CLASS)) {
14111 tg3_flag_set(tp, 40BIT_DMA_BUG);
14112 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14114 struct pci_dev *bridge = NULL;
14117 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14118 PCI_DEVICE_ID_SERVERWORKS_EPB,
14120 if (bridge && bridge->subordinate &&
14121 (bridge->subordinate->number <=
14122 tp->pdev->bus->number) &&
14123 (bridge->subordinate->subordinate >=
14124 tp->pdev->bus->number)) {
14125 tg3_flag_set(tp, 40BIT_DMA_BUG);
14126 pci_dev_put(bridge);
14132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14134 tp->pdev_peer = tg3_find_peer(tp);
14136 /* Determine TSO capabilities */
14137 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14138 ; /* Do nothing. HW bug. */
14139 else if (tg3_flag(tp, 57765_PLUS))
14140 tg3_flag_set(tp, HW_TSO_3);
14141 else if (tg3_flag(tp, 5755_PLUS) ||
14142 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14143 tg3_flag_set(tp, HW_TSO_2);
14144 else if (tg3_flag(tp, 5750_PLUS)) {
14145 tg3_flag_set(tp, HW_TSO_1);
14146 tg3_flag_set(tp, TSO_BUG);
14147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14148 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14149 tg3_flag_clear(tp, TSO_BUG);
14150 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14151 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14152 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14153 tg3_flag_set(tp, TSO_BUG);
14154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14155 tp->fw_needed = FIRMWARE_TG3TSO5;
14157 tp->fw_needed = FIRMWARE_TG3TSO;
14160 /* Selectively allow TSO based on operating conditions */
14161 if (tg3_flag(tp, HW_TSO_1) ||
14162 tg3_flag(tp, HW_TSO_2) ||
14163 tg3_flag(tp, HW_TSO_3) ||
14165 /* For firmware TSO, assume ASF is disabled.
14166 * We'll disable TSO later if we discover ASF
14167 * is enabled in tg3_get_eeprom_hw_cfg().
14169 tg3_flag_set(tp, TSO_CAPABLE);
14171 tg3_flag_clear(tp, TSO_CAPABLE);
14172 tg3_flag_clear(tp, TSO_BUG);
14173 tp->fw_needed = NULL;
14176 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14177 tp->fw_needed = FIRMWARE_TG3;
14181 if (tg3_flag(tp, 5750_PLUS)) {
14182 tg3_flag_set(tp, SUPPORT_MSI);
14183 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14184 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14185 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14186 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14187 tp->pdev_peer == tp->pdev))
14188 tg3_flag_clear(tp, SUPPORT_MSI);
14190 if (tg3_flag(tp, 5755_PLUS) ||
14191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14192 tg3_flag_set(tp, 1SHOT_MSI);
14195 if (tg3_flag(tp, 57765_PLUS)) {
14196 tg3_flag_set(tp, SUPPORT_MSIX);
14197 tp->irq_max = TG3_IRQ_MAX_VECS;
14198 tg3_rss_init_dflt_indir_tbl(tp);
14202 if (tg3_flag(tp, 5755_PLUS))
14203 tg3_flag_set(tp, SHORT_DMA_BUG);
14205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14206 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14208 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14209 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14210 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14211 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14213 if (tg3_flag(tp, 57765_PLUS) &&
14214 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14215 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14217 if (!tg3_flag(tp, 5705_PLUS) ||
14218 tg3_flag(tp, 5780_CLASS) ||
14219 tg3_flag(tp, USE_JUMBO_BDFLAG))
14220 tg3_flag_set(tp, JUMBO_CAPABLE);
14222 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14225 if (pci_is_pcie(tp->pdev)) {
14228 tg3_flag_set(tp, PCI_EXPRESS);
14230 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
14231 int readrq = pcie_get_readrq(tp->pdev);
14233 pcie_set_readrq(tp->pdev, 2048);
14236 pci_read_config_word(tp->pdev,
14237 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14239 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14240 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14242 tg3_flag_clear(tp, HW_TSO_2);
14243 tg3_flag_clear(tp, TSO_CAPABLE);
14245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14247 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14248 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14249 tg3_flag_set(tp, CLKREQ_BUG);
14250 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14251 tg3_flag_set(tp, L1PLLPD_EN);
14253 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14254 /* BCM5785 devices are effectively PCIe devices, and should
14255 * follow PCIe codepaths, but do not have a PCIe capabilities
14258 tg3_flag_set(tp, PCI_EXPRESS);
14259 } else if (!tg3_flag(tp, 5705_PLUS) ||
14260 tg3_flag(tp, 5780_CLASS)) {
14261 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14262 if (!tp->pcix_cap) {
14263 dev_err(&tp->pdev->dev,
14264 "Cannot find PCI-X capability, aborting\n");
14268 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14269 tg3_flag_set(tp, PCIX_MODE);
14272 /* If we have an AMD 762 or VIA K8T800 chipset, write
14273 * reordering to the mailbox registers done by the host
14274 * controller can cause major troubles. We read back from
14275 * every mailbox register write to force the writes to be
14276 * posted to the chip in order.
14278 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14279 !tg3_flag(tp, PCI_EXPRESS))
14280 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14282 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14283 &tp->pci_cacheline_sz);
14284 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14285 &tp->pci_lat_timer);
14286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14287 tp->pci_lat_timer < 64) {
14288 tp->pci_lat_timer = 64;
14289 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14290 tp->pci_lat_timer);
14293 /* Important! -- It is critical that the PCI-X hw workaround
14294 * situation is decided before the first MMIO register access.
14296 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14297 /* 5700 BX chips need to have their TX producer index
14298 * mailboxes written twice to workaround a bug.
14300 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14302 /* If we are in PCI-X mode, enable register write workaround.
14304 * The workaround is to use indirect register accesses
14305 * for all chip writes not to mailbox registers.
14307 if (tg3_flag(tp, PCIX_MODE)) {
14310 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14312 /* The chip can have it's power management PCI config
14313 * space registers clobbered due to this bug.
14314 * So explicitly force the chip into D0 here.
14316 pci_read_config_dword(tp->pdev,
14317 tp->pm_cap + PCI_PM_CTRL,
14319 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14320 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14321 pci_write_config_dword(tp->pdev,
14322 tp->pm_cap + PCI_PM_CTRL,
14325 /* Also, force SERR#/PERR# in PCI command. */
14326 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14327 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14328 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14332 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14333 tg3_flag_set(tp, PCI_HIGH_SPEED);
14334 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14335 tg3_flag_set(tp, PCI_32BIT);
14337 /* Chip-specific fixup from Broadcom driver */
14338 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14339 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14340 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14341 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14344 /* Default fast path register access methods */
14345 tp->read32 = tg3_read32;
14346 tp->write32 = tg3_write32;
14347 tp->read32_mbox = tg3_read32;
14348 tp->write32_mbox = tg3_write32;
14349 tp->write32_tx_mbox = tg3_write32;
14350 tp->write32_rx_mbox = tg3_write32;
14352 /* Various workaround register access methods */
14353 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14354 tp->write32 = tg3_write_indirect_reg32;
14355 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14356 (tg3_flag(tp, PCI_EXPRESS) &&
14357 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14359 * Back to back register writes can cause problems on these
14360 * chips, the workaround is to read back all reg writes
14361 * except those to mailbox regs.
14363 * See tg3_write_indirect_reg32().
14365 tp->write32 = tg3_write_flush_reg32;
14368 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14369 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14370 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14371 tp->write32_rx_mbox = tg3_write_flush_reg32;
14374 if (tg3_flag(tp, ICH_WORKAROUND)) {
14375 tp->read32 = tg3_read_indirect_reg32;
14376 tp->write32 = tg3_write_indirect_reg32;
14377 tp->read32_mbox = tg3_read_indirect_mbox;
14378 tp->write32_mbox = tg3_write_indirect_mbox;
14379 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14380 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14385 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14386 pci_cmd &= ~PCI_COMMAND_MEMORY;
14387 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14389 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14390 tp->read32_mbox = tg3_read32_mbox_5906;
14391 tp->write32_mbox = tg3_write32_mbox_5906;
14392 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14393 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14396 if (tp->write32 == tg3_write_indirect_reg32 ||
14397 (tg3_flag(tp, PCIX_MODE) &&
14398 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14399 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14400 tg3_flag_set(tp, SRAM_USE_CONFIG);
14402 /* The memory arbiter has to be enabled in order for SRAM accesses
14403 * to succeed. Normally on powerup the tg3 chip firmware will make
14404 * sure it is enabled, but other entities such as system netboot
14405 * code might disable it.
14407 val = tr32(MEMARB_MODE);
14408 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14410 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14411 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14412 tg3_flag(tp, 5780_CLASS)) {
14413 if (tg3_flag(tp, PCIX_MODE)) {
14414 pci_read_config_dword(tp->pdev,
14415 tp->pcix_cap + PCI_X_STATUS,
14417 tp->pci_fn = val & 0x7;
14419 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14420 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14421 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14422 NIC_SRAM_CPMUSTAT_SIG) {
14423 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14424 tp->pci_fn = tp->pci_fn ? 1 : 0;
14426 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14427 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14428 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14429 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14430 NIC_SRAM_CPMUSTAT_SIG) {
14431 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14432 TG3_CPMU_STATUS_FSHFT_5719;
14436 /* Get eeprom hw config before calling tg3_set_power_state().
14437 * In particular, the TG3_FLAG_IS_NIC flag must be
14438 * determined before calling tg3_set_power_state() so that
14439 * we know whether or not to switch out of Vaux power.
14440 * When the flag is set, it means that GPIO1 is used for eeprom
14441 * write protect and also implies that it is a LOM where GPIOs
14442 * are not used to switch power.
14444 tg3_get_eeprom_hw_cfg(tp);
14446 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14447 tg3_flag_clear(tp, TSO_CAPABLE);
14448 tg3_flag_clear(tp, TSO_BUG);
14449 tp->fw_needed = NULL;
14452 if (tg3_flag(tp, ENABLE_APE)) {
14453 /* Allow reads and writes to the
14454 * APE register and memory space.
14456 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14457 PCISTATE_ALLOW_APE_SHMEM_WR |
14458 PCISTATE_ALLOW_APE_PSPACE_WR;
14459 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14462 tg3_ape_lock_init(tp);
14465 /* Set up tp->grc_local_ctrl before calling
14466 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14467 * will bring 5700's external PHY out of reset.
14468 * It is also used as eeprom write protect on LOMs.
14470 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14471 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14472 tg3_flag(tp, EEPROM_WRITE_PROT))
14473 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14474 GRC_LCLCTRL_GPIO_OUTPUT1);
14475 /* Unused GPIO3 must be driven as output on 5752 because there
14476 * are no pull-up resistors on unused GPIO pins.
14478 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14479 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14481 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14482 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14483 tg3_flag(tp, 57765_CLASS))
14484 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14486 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14487 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14488 /* Turn off the debug UART. */
14489 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14490 if (tg3_flag(tp, IS_NIC))
14491 /* Keep VMain power. */
14492 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14493 GRC_LCLCTRL_GPIO_OUTPUT0;
14496 /* Switch out of Vaux if it is a NIC */
14497 tg3_pwrsrc_switch_to_vmain(tp);
14499 /* Derive initial jumbo mode from MTU assigned in
14500 * ether_setup() via the alloc_etherdev() call
14502 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14503 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14505 /* Determine WakeOnLan speed to use. */
14506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14507 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14508 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14509 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14510 tg3_flag_clear(tp, WOL_SPEED_100MB);
14512 tg3_flag_set(tp, WOL_SPEED_100MB);
14515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14516 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14518 /* A few boards don't want Ethernet@WireSpeed phy feature */
14519 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14520 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14521 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14522 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14523 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14524 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14525 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14527 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14528 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14529 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14530 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14531 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14533 if (tg3_flag(tp, 5705_PLUS) &&
14534 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14535 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14536 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14537 !tg3_flag(tp, 57765_PLUS)) {
14538 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14539 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14542 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14543 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14544 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14545 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14546 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14548 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14551 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14552 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14553 tp->phy_otp = tg3_read_otp_phycfg(tp);
14554 if (tp->phy_otp == 0)
14555 tp->phy_otp = TG3_OTP_DEFAULT;
14558 if (tg3_flag(tp, CPMU_PRESENT))
14559 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14561 tp->mi_mode = MAC_MI_MODE_BASE;
14563 tp->coalesce_mode = 0;
14564 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14565 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14566 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14568 /* Set these bits to enable statistics workaround. */
14569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14570 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14571 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14572 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14573 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14576 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14577 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14578 tg3_flag_set(tp, USE_PHYLIB);
14580 err = tg3_mdio_init(tp);
14584 /* Initialize data/descriptor byte/word swapping. */
14585 val = tr32(GRC_MODE);
14586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14587 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14588 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14589 GRC_MODE_B2HRX_ENABLE |
14590 GRC_MODE_HTX2B_ENABLE |
14591 GRC_MODE_HOST_STACKUP);
14593 val &= GRC_MODE_HOST_STACKUP;
14595 tw32(GRC_MODE, val | tp->grc_mode);
14597 tg3_switch_clocks(tp);
14599 /* Clear this out for sanity. */
14600 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14602 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14604 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14605 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14606 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14608 if (chiprevid == CHIPREV_ID_5701_A0 ||
14609 chiprevid == CHIPREV_ID_5701_B0 ||
14610 chiprevid == CHIPREV_ID_5701_B2 ||
14611 chiprevid == CHIPREV_ID_5701_B5) {
14612 void __iomem *sram_base;
14614 /* Write some dummy words into the SRAM status block
14615 * area, see if it reads back correctly. If the return
14616 * value is bad, force enable the PCIX workaround.
14618 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14620 writel(0x00000000, sram_base);
14621 writel(0x00000000, sram_base + 4);
14622 writel(0xffffffff, sram_base + 4);
14623 if (readl(sram_base) != 0x00000000)
14624 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14629 tg3_nvram_init(tp);
14631 grc_misc_cfg = tr32(GRC_MISC_CFG);
14632 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14634 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14635 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14636 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14637 tg3_flag_set(tp, IS_5788);
14639 if (!tg3_flag(tp, IS_5788) &&
14640 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14641 tg3_flag_set(tp, TAGGED_STATUS);
14642 if (tg3_flag(tp, TAGGED_STATUS)) {
14643 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14644 HOSTCC_MODE_CLRTICK_TXBD);
14646 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14647 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14648 tp->misc_host_ctrl);
14651 /* Preserve the APE MAC_MODE bits */
14652 if (tg3_flag(tp, ENABLE_APE))
14653 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14657 /* these are limited to 10/100 only */
14658 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14659 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14660 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14661 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14662 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14663 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14664 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14665 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14666 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14667 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14668 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14669 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14670 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14671 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14672 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14673 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14675 err = tg3_phy_probe(tp);
14677 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14678 /* ... but do not return immediately ... */
14683 tg3_read_fw_ver(tp);
14685 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14686 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14688 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14689 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14691 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14694 /* 5700 {AX,BX} chips have a broken status block link
14695 * change bit implementation, so we must use the
14696 * status register in those cases.
14698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14699 tg3_flag_set(tp, USE_LINKCHG_REG);
14701 tg3_flag_clear(tp, USE_LINKCHG_REG);
14703 /* The led_ctrl is set during tg3_phy_probe, here we might
14704 * have to force the link status polling mechanism based
14705 * upon subsystem IDs.
14707 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14708 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14709 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14710 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14711 tg3_flag_set(tp, USE_LINKCHG_REG);
14714 /* For all SERDES we poll the MAC status register. */
14715 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14716 tg3_flag_set(tp, POLL_SERDES);
14718 tg3_flag_clear(tp, POLL_SERDES);
14720 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14721 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14722 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14723 tg3_flag(tp, PCIX_MODE)) {
14724 tp->rx_offset = NET_SKB_PAD;
14725 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14726 tp->rx_copy_thresh = ~(u16)0;
14730 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14731 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14732 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14734 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14736 /* Increment the rx prod index on the rx std ring by at most
14737 * 8 for these chips to workaround hw errata.
14739 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14740 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14742 tp->rx_std_max_post = 8;
14744 if (tg3_flag(tp, ASPM_WORKAROUND))
14745 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14746 PCIE_PWR_MGMT_L1_THRESH_MSK;
14751 #ifdef CONFIG_SPARC
14752 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14754 struct net_device *dev = tp->dev;
14755 struct pci_dev *pdev = tp->pdev;
14756 struct device_node *dp = pci_device_to_OF_node(pdev);
14757 const unsigned char *addr;
14760 addr = of_get_property(dp, "local-mac-address", &len);
14761 if (addr && len == 6) {
14762 memcpy(dev->dev_addr, addr, 6);
14763 memcpy(dev->perm_addr, dev->dev_addr, 6);
14769 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14771 struct net_device *dev = tp->dev;
14773 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14774 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14779 static int __devinit tg3_get_device_address(struct tg3 *tp)
14781 struct net_device *dev = tp->dev;
14782 u32 hi, lo, mac_offset;
14785 #ifdef CONFIG_SPARC
14786 if (!tg3_get_macaddr_sparc(tp))
14791 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14792 tg3_flag(tp, 5780_CLASS)) {
14793 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14795 if (tg3_nvram_lock(tp))
14796 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14798 tg3_nvram_unlock(tp);
14799 } else if (tg3_flag(tp, 5717_PLUS)) {
14800 if (tp->pci_fn & 1)
14802 if (tp->pci_fn > 1)
14803 mac_offset += 0x18c;
14804 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14807 /* First try to get it from MAC address mailbox. */
14808 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14809 if ((hi >> 16) == 0x484b) {
14810 dev->dev_addr[0] = (hi >> 8) & 0xff;
14811 dev->dev_addr[1] = (hi >> 0) & 0xff;
14813 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14814 dev->dev_addr[2] = (lo >> 24) & 0xff;
14815 dev->dev_addr[3] = (lo >> 16) & 0xff;
14816 dev->dev_addr[4] = (lo >> 8) & 0xff;
14817 dev->dev_addr[5] = (lo >> 0) & 0xff;
14819 /* Some old bootcode may report a 0 MAC address in SRAM */
14820 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14823 /* Next, try NVRAM. */
14824 if (!tg3_flag(tp, NO_NVRAM) &&
14825 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14826 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14827 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14828 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14830 /* Finally just fetch it out of the MAC control regs. */
14832 hi = tr32(MAC_ADDR_0_HIGH);
14833 lo = tr32(MAC_ADDR_0_LOW);
14835 dev->dev_addr[5] = lo & 0xff;
14836 dev->dev_addr[4] = (lo >> 8) & 0xff;
14837 dev->dev_addr[3] = (lo >> 16) & 0xff;
14838 dev->dev_addr[2] = (lo >> 24) & 0xff;
14839 dev->dev_addr[1] = hi & 0xff;
14840 dev->dev_addr[0] = (hi >> 8) & 0xff;
14844 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14845 #ifdef CONFIG_SPARC
14846 if (!tg3_get_default_macaddr_sparc(tp))
14851 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14855 #define BOUNDARY_SINGLE_CACHELINE 1
14856 #define BOUNDARY_MULTI_CACHELINE 2
14858 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14860 int cacheline_size;
14864 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14866 cacheline_size = 1024;
14868 cacheline_size = (int) byte * 4;
14870 /* On 5703 and later chips, the boundary bits have no
14873 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14874 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14875 !tg3_flag(tp, PCI_EXPRESS))
14878 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14879 goal = BOUNDARY_MULTI_CACHELINE;
14881 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14882 goal = BOUNDARY_SINGLE_CACHELINE;
14888 if (tg3_flag(tp, 57765_PLUS)) {
14889 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14896 /* PCI controllers on most RISC systems tend to disconnect
14897 * when a device tries to burst across a cache-line boundary.
14898 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14900 * Unfortunately, for PCI-E there are only limited
14901 * write-side controls for this, and thus for reads
14902 * we will still get the disconnects. We'll also waste
14903 * these PCI cycles for both read and write for chips
14904 * other than 5700 and 5701 which do not implement the
14907 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14908 switch (cacheline_size) {
14913 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14914 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14915 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14917 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14918 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14923 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14924 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14928 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14929 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14932 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14933 switch (cacheline_size) {
14937 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14938 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14939 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14945 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14946 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14950 switch (cacheline_size) {
14952 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14953 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14954 DMA_RWCTRL_WRITE_BNDRY_16);
14959 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14960 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14961 DMA_RWCTRL_WRITE_BNDRY_32);
14966 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14967 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14968 DMA_RWCTRL_WRITE_BNDRY_64);
14973 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14974 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14975 DMA_RWCTRL_WRITE_BNDRY_128);
14980 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14981 DMA_RWCTRL_WRITE_BNDRY_256);
14984 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14985 DMA_RWCTRL_WRITE_BNDRY_512);
14989 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14990 DMA_RWCTRL_WRITE_BNDRY_1024);
14999 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15001 struct tg3_internal_buffer_desc test_desc;
15002 u32 sram_dma_descs;
15005 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15007 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15008 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15009 tw32(RDMAC_STATUS, 0);
15010 tw32(WDMAC_STATUS, 0);
15012 tw32(BUFMGR_MODE, 0);
15013 tw32(FTQ_RESET, 0);
15015 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15016 test_desc.addr_lo = buf_dma & 0xffffffff;
15017 test_desc.nic_mbuf = 0x00002100;
15018 test_desc.len = size;
15021 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15022 * the *second* time the tg3 driver was getting loaded after an
15025 * Broadcom tells me:
15026 * ...the DMA engine is connected to the GRC block and a DMA
15027 * reset may affect the GRC block in some unpredictable way...
15028 * The behavior of resets to individual blocks has not been tested.
15030 * Broadcom noted the GRC reset will also reset all sub-components.
15033 test_desc.cqid_sqid = (13 << 8) | 2;
15035 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15038 test_desc.cqid_sqid = (16 << 8) | 7;
15040 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15043 test_desc.flags = 0x00000005;
15045 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15048 val = *(((u32 *)&test_desc) + i);
15049 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15050 sram_dma_descs + (i * sizeof(u32)));
15051 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15053 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15056 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15058 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15061 for (i = 0; i < 40; i++) {
15065 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15067 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15068 if ((val & 0xffff) == sram_dma_descs) {
15079 #define TEST_BUFFER_SIZE 0x2000
15081 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15082 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15086 static int __devinit tg3_test_dma(struct tg3 *tp)
15088 dma_addr_t buf_dma;
15089 u32 *buf, saved_dma_rwctrl;
15092 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15093 &buf_dma, GFP_KERNEL);
15099 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15100 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15102 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15104 if (tg3_flag(tp, 57765_PLUS))
15107 if (tg3_flag(tp, PCI_EXPRESS)) {
15108 /* DMA read watermark not used on PCIE */
15109 tp->dma_rwctrl |= 0x00180000;
15110 } else if (!tg3_flag(tp, PCIX_MODE)) {
15111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15112 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15113 tp->dma_rwctrl |= 0x003f0000;
15115 tp->dma_rwctrl |= 0x003f000f;
15117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15118 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15119 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15120 u32 read_water = 0x7;
15122 /* If the 5704 is behind the EPB bridge, we can
15123 * do the less restrictive ONE_DMA workaround for
15124 * better performance.
15126 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15128 tp->dma_rwctrl |= 0x8000;
15129 else if (ccval == 0x6 || ccval == 0x7)
15130 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15132 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15134 /* Set bit 23 to enable PCIX hw bug fix */
15136 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15137 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15139 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15140 /* 5780 always in PCIX mode */
15141 tp->dma_rwctrl |= 0x00144000;
15142 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15143 /* 5714 always in PCIX mode */
15144 tp->dma_rwctrl |= 0x00148000;
15146 tp->dma_rwctrl |= 0x001b000f;
15150 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15152 tp->dma_rwctrl &= 0xfffffff0;
15154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15156 /* Remove this if it causes problems for some boards. */
15157 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15159 /* On 5700/5701 chips, we need to set this bit.
15160 * Otherwise the chip will issue cacheline transactions
15161 * to streamable DMA memory with not all the byte
15162 * enables turned on. This is an error on several
15163 * RISC PCI controllers, in particular sparc64.
15165 * On 5703/5704 chips, this bit has been reassigned
15166 * a different meaning. In particular, it is used
15167 * on those chips to enable a PCI-X workaround.
15169 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15172 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15175 /* Unneeded, already done by tg3_get_invariants. */
15176 tg3_switch_clocks(tp);
15179 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15180 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15183 /* It is best to perform DMA test with maximum write burst size
15184 * to expose the 5700/5701 write DMA bug.
15186 saved_dma_rwctrl = tp->dma_rwctrl;
15187 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15188 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15193 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15196 /* Send the buffer to the chip. */
15197 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15199 dev_err(&tp->pdev->dev,
15200 "%s: Buffer write failed. err = %d\n",
15206 /* validate data reached card RAM correctly. */
15207 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15209 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15210 if (le32_to_cpu(val) != p[i]) {
15211 dev_err(&tp->pdev->dev,
15212 "%s: Buffer corrupted on device! "
15213 "(%d != %d)\n", __func__, val, i);
15214 /* ret = -ENODEV here? */
15219 /* Now read it back. */
15220 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15222 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15223 "err = %d\n", __func__, ret);
15228 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15232 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15233 DMA_RWCTRL_WRITE_BNDRY_16) {
15234 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15235 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15236 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15239 dev_err(&tp->pdev->dev,
15240 "%s: Buffer corrupted on read back! "
15241 "(%d != %d)\n", __func__, p[i], i);
15247 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15253 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15254 DMA_RWCTRL_WRITE_BNDRY_16) {
15255 /* DMA test passed without adjusting DMA boundary,
15256 * now look for chipsets that are known to expose the
15257 * DMA bug without failing the test.
15259 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15260 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15261 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15263 /* Safe to use the calculated DMA boundary. */
15264 tp->dma_rwctrl = saved_dma_rwctrl;
15267 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15271 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15276 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15278 if (tg3_flag(tp, 57765_PLUS)) {
15279 tp->bufmgr_config.mbuf_read_dma_low_water =
15280 DEFAULT_MB_RDMA_LOW_WATER_5705;
15281 tp->bufmgr_config.mbuf_mac_rx_low_water =
15282 DEFAULT_MB_MACRX_LOW_WATER_57765;
15283 tp->bufmgr_config.mbuf_high_water =
15284 DEFAULT_MB_HIGH_WATER_57765;
15286 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15287 DEFAULT_MB_RDMA_LOW_WATER_5705;
15288 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15289 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15290 tp->bufmgr_config.mbuf_high_water_jumbo =
15291 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15292 } else if (tg3_flag(tp, 5705_PLUS)) {
15293 tp->bufmgr_config.mbuf_read_dma_low_water =
15294 DEFAULT_MB_RDMA_LOW_WATER_5705;
15295 tp->bufmgr_config.mbuf_mac_rx_low_water =
15296 DEFAULT_MB_MACRX_LOW_WATER_5705;
15297 tp->bufmgr_config.mbuf_high_water =
15298 DEFAULT_MB_HIGH_WATER_5705;
15299 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15300 tp->bufmgr_config.mbuf_mac_rx_low_water =
15301 DEFAULT_MB_MACRX_LOW_WATER_5906;
15302 tp->bufmgr_config.mbuf_high_water =
15303 DEFAULT_MB_HIGH_WATER_5906;
15306 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15307 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15308 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15309 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15310 tp->bufmgr_config.mbuf_high_water_jumbo =
15311 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15313 tp->bufmgr_config.mbuf_read_dma_low_water =
15314 DEFAULT_MB_RDMA_LOW_WATER;
15315 tp->bufmgr_config.mbuf_mac_rx_low_water =
15316 DEFAULT_MB_MACRX_LOW_WATER;
15317 tp->bufmgr_config.mbuf_high_water =
15318 DEFAULT_MB_HIGH_WATER;
15320 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15321 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15322 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15323 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15324 tp->bufmgr_config.mbuf_high_water_jumbo =
15325 DEFAULT_MB_HIGH_WATER_JUMBO;
15328 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15329 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15332 static char * __devinit tg3_phy_string(struct tg3 *tp)
15334 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15335 case TG3_PHY_ID_BCM5400: return "5400";
15336 case TG3_PHY_ID_BCM5401: return "5401";
15337 case TG3_PHY_ID_BCM5411: return "5411";
15338 case TG3_PHY_ID_BCM5701: return "5701";
15339 case TG3_PHY_ID_BCM5703: return "5703";
15340 case TG3_PHY_ID_BCM5704: return "5704";
15341 case TG3_PHY_ID_BCM5705: return "5705";
15342 case TG3_PHY_ID_BCM5750: return "5750";
15343 case TG3_PHY_ID_BCM5752: return "5752";
15344 case TG3_PHY_ID_BCM5714: return "5714";
15345 case TG3_PHY_ID_BCM5780: return "5780";
15346 case TG3_PHY_ID_BCM5755: return "5755";
15347 case TG3_PHY_ID_BCM5787: return "5787";
15348 case TG3_PHY_ID_BCM5784: return "5784";
15349 case TG3_PHY_ID_BCM5756: return "5722/5756";
15350 case TG3_PHY_ID_BCM5906: return "5906";
15351 case TG3_PHY_ID_BCM5761: return "5761";
15352 case TG3_PHY_ID_BCM5718C: return "5718C";
15353 case TG3_PHY_ID_BCM5718S: return "5718S";
15354 case TG3_PHY_ID_BCM57765: return "57765";
15355 case TG3_PHY_ID_BCM5719C: return "5719C";
15356 case TG3_PHY_ID_BCM5720C: return "5720C";
15357 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15358 case 0: return "serdes";
15359 default: return "unknown";
15363 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15365 if (tg3_flag(tp, PCI_EXPRESS)) {
15366 strcpy(str, "PCI Express");
15368 } else if (tg3_flag(tp, PCIX_MODE)) {
15369 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15371 strcpy(str, "PCIX:");
15373 if ((clock_ctrl == 7) ||
15374 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15375 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15376 strcat(str, "133MHz");
15377 else if (clock_ctrl == 0)
15378 strcat(str, "33MHz");
15379 else if (clock_ctrl == 2)
15380 strcat(str, "50MHz");
15381 else if (clock_ctrl == 4)
15382 strcat(str, "66MHz");
15383 else if (clock_ctrl == 6)
15384 strcat(str, "100MHz");
15386 strcpy(str, "PCI:");
15387 if (tg3_flag(tp, PCI_HIGH_SPEED))
15388 strcat(str, "66MHz");
15390 strcat(str, "33MHz");
15392 if (tg3_flag(tp, PCI_32BIT))
15393 strcat(str, ":32-bit");
15395 strcat(str, ":64-bit");
15399 static void __devinit tg3_init_coal(struct tg3 *tp)
15401 struct ethtool_coalesce *ec = &tp->coal;
15403 memset(ec, 0, sizeof(*ec));
15404 ec->cmd = ETHTOOL_GCOALESCE;
15405 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15406 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15407 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15408 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15409 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15410 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15411 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15412 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15413 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15415 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15416 HOSTCC_MODE_CLRTICK_TXBD)) {
15417 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15418 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15419 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15420 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15423 if (tg3_flag(tp, 5705_PLUS)) {
15424 ec->rx_coalesce_usecs_irq = 0;
15425 ec->tx_coalesce_usecs_irq = 0;
15426 ec->stats_block_coalesce_usecs = 0;
15430 static int __devinit tg3_init_one(struct pci_dev *pdev,
15431 const struct pci_device_id *ent)
15433 struct net_device *dev;
15435 int i, err, pm_cap;
15436 u32 sndmbx, rcvmbx, intmbx;
15438 u64 dma_mask, persist_dma_mask;
15439 netdev_features_t features = 0;
15441 printk_once(KERN_INFO "%s\n", version);
15443 err = pci_enable_device(pdev);
15445 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15449 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15451 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15452 goto err_out_disable_pdev;
15455 pci_set_master(pdev);
15457 /* Find power-management capability. */
15458 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15460 dev_err(&pdev->dev,
15461 "Cannot find Power Management capability, aborting\n");
15463 goto err_out_free_res;
15466 err = pci_set_power_state(pdev, PCI_D0);
15468 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15469 goto err_out_free_res;
15472 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15475 goto err_out_power_down;
15478 SET_NETDEV_DEV(dev, &pdev->dev);
15480 tp = netdev_priv(dev);
15483 tp->pm_cap = pm_cap;
15484 tp->rx_mode = TG3_DEF_RX_MODE;
15485 tp->tx_mode = TG3_DEF_TX_MODE;
15488 tp->msg_enable = tg3_debug;
15490 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15492 /* The word/byte swap controls here control register access byte
15493 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15496 tp->misc_host_ctrl =
15497 MISC_HOST_CTRL_MASK_PCI_INT |
15498 MISC_HOST_CTRL_WORD_SWAP |
15499 MISC_HOST_CTRL_INDIR_ACCESS |
15500 MISC_HOST_CTRL_PCISTATE_RW;
15502 /* The NONFRM (non-frame) byte/word swap controls take effect
15503 * on descriptor entries, anything which isn't packet data.
15505 * The StrongARM chips on the board (one for tx, one for rx)
15506 * are running in big-endian mode.
15508 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15509 GRC_MODE_WSWAP_NONFRM_DATA);
15510 #ifdef __BIG_ENDIAN
15511 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15513 spin_lock_init(&tp->lock);
15514 spin_lock_init(&tp->indirect_lock);
15515 INIT_WORK(&tp->reset_task, tg3_reset_task);
15517 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15519 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15521 goto err_out_free_dev;
15524 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15525 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15526 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15527 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15528 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15529 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15530 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15531 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15532 tg3_flag_set(tp, ENABLE_APE);
15533 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15534 if (!tp->aperegs) {
15535 dev_err(&pdev->dev,
15536 "Cannot map APE registers, aborting\n");
15538 goto err_out_iounmap;
15542 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15543 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15545 dev->ethtool_ops = &tg3_ethtool_ops;
15546 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15547 dev->netdev_ops = &tg3_netdev_ops;
15548 dev->irq = pdev->irq;
15550 err = tg3_get_invariants(tp);
15552 dev_err(&pdev->dev,
15553 "Problem fetching invariants of chip, aborting\n");
15554 goto err_out_apeunmap;
15557 /* The EPB bridge inside 5714, 5715, and 5780 and any
15558 * device behind the EPB cannot support DMA addresses > 40-bit.
15559 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15560 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15561 * do DMA address check in tg3_start_xmit().
15563 if (tg3_flag(tp, IS_5788))
15564 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15565 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15566 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15567 #ifdef CONFIG_HIGHMEM
15568 dma_mask = DMA_BIT_MASK(64);
15571 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15573 /* Configure DMA attributes. */
15574 if (dma_mask > DMA_BIT_MASK(32)) {
15575 err = pci_set_dma_mask(pdev, dma_mask);
15577 features |= NETIF_F_HIGHDMA;
15578 err = pci_set_consistent_dma_mask(pdev,
15581 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15582 "DMA for consistent allocations\n");
15583 goto err_out_apeunmap;
15587 if (err || dma_mask == DMA_BIT_MASK(32)) {
15588 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15590 dev_err(&pdev->dev,
15591 "No usable DMA configuration, aborting\n");
15592 goto err_out_apeunmap;
15596 tg3_init_bufmgr_config(tp);
15598 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15600 /* 5700 B0 chips do not support checksumming correctly due
15601 * to hardware bugs.
15603 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15604 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15606 if (tg3_flag(tp, 5755_PLUS))
15607 features |= NETIF_F_IPV6_CSUM;
15610 /* TSO is on by default on chips that support hardware TSO.
15611 * Firmware TSO on older chips gives lower performance, so it
15612 * is off by default, but can be enabled using ethtool.
15614 if ((tg3_flag(tp, HW_TSO_1) ||
15615 tg3_flag(tp, HW_TSO_2) ||
15616 tg3_flag(tp, HW_TSO_3)) &&
15617 (features & NETIF_F_IP_CSUM))
15618 features |= NETIF_F_TSO;
15619 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15620 if (features & NETIF_F_IPV6_CSUM)
15621 features |= NETIF_F_TSO6;
15622 if (tg3_flag(tp, HW_TSO_3) ||
15623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15624 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15625 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15628 features |= NETIF_F_TSO_ECN;
15631 dev->features |= features;
15632 dev->vlan_features |= features;
15635 * Add loopback capability only for a subset of devices that support
15636 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15637 * loopback for the remaining devices.
15639 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15640 !tg3_flag(tp, CPMU_PRESENT))
15641 /* Add the loopback capability */
15642 features |= NETIF_F_LOOPBACK;
15644 dev->hw_features |= features;
15646 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15647 !tg3_flag(tp, TSO_CAPABLE) &&
15648 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15649 tg3_flag_set(tp, MAX_RXPEND_64);
15650 tp->rx_pending = 63;
15653 err = tg3_get_device_address(tp);
15655 dev_err(&pdev->dev,
15656 "Could not obtain valid ethernet address, aborting\n");
15657 goto err_out_apeunmap;
15661 * Reset chip in case UNDI or EFI driver did not shutdown
15662 * DMA self test will enable WDMAC and we'll see (spurious)
15663 * pending DMA on the PCI bus at that point.
15665 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15666 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15667 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15668 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15671 err = tg3_test_dma(tp);
15673 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15674 goto err_out_apeunmap;
15677 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15678 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15679 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15680 for (i = 0; i < tp->irq_max; i++) {
15681 struct tg3_napi *tnapi = &tp->napi[i];
15684 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15686 tnapi->int_mbox = intmbx;
15692 tnapi->consmbox = rcvmbx;
15693 tnapi->prodmbox = sndmbx;
15696 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15698 tnapi->coal_now = HOSTCC_MODE_NOW;
15700 if (!tg3_flag(tp, SUPPORT_MSIX))
15704 * If we support MSIX, we'll be using RSS. If we're using
15705 * RSS, the first vector only handles link interrupts and the
15706 * remaining vectors handle rx and tx interrupts. Reuse the
15707 * mailbox values for the next iteration. The values we setup
15708 * above are still useful for the single vectored mode.
15723 pci_set_drvdata(pdev, dev);
15725 if (tg3_flag(tp, 5717_PLUS)) {
15726 /* Resume a low-power mode */
15727 tg3_frob_aux_power(tp, false);
15730 tg3_timer_init(tp);
15732 err = register_netdev(dev);
15734 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15735 goto err_out_apeunmap;
15738 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15739 tp->board_part_number,
15740 tp->pci_chip_rev_id,
15741 tg3_bus_string(tp, str),
15744 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15745 struct phy_device *phydev;
15746 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15748 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15749 phydev->drv->name, dev_name(&phydev->dev));
15753 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15754 ethtype = "10/100Base-TX";
15755 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15756 ethtype = "1000Base-SX";
15758 ethtype = "10/100/1000Base-T";
15760 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15761 "(WireSpeed[%d], EEE[%d])\n",
15762 tg3_phy_string(tp), ethtype,
15763 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15764 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15767 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15768 (dev->features & NETIF_F_RXCSUM) != 0,
15769 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15770 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15771 tg3_flag(tp, ENABLE_ASF) != 0,
15772 tg3_flag(tp, TSO_CAPABLE) != 0);
15773 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15775 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15776 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15778 pci_save_state(pdev);
15784 iounmap(tp->aperegs);
15785 tp->aperegs = NULL;
15797 err_out_power_down:
15798 pci_set_power_state(pdev, PCI_D3hot);
15801 pci_release_regions(pdev);
15803 err_out_disable_pdev:
15804 pci_disable_device(pdev);
15805 pci_set_drvdata(pdev, NULL);
15809 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15811 struct net_device *dev = pci_get_drvdata(pdev);
15814 struct tg3 *tp = netdev_priv(dev);
15817 release_firmware(tp->fw);
15819 tg3_reset_task_cancel(tp);
15821 if (tg3_flag(tp, USE_PHYLIB)) {
15826 unregister_netdev(dev);
15828 iounmap(tp->aperegs);
15829 tp->aperegs = NULL;
15836 pci_release_regions(pdev);
15837 pci_disable_device(pdev);
15838 pci_set_drvdata(pdev, NULL);
15842 #ifdef CONFIG_PM_SLEEP
15843 static int tg3_suspend(struct device *device)
15845 struct pci_dev *pdev = to_pci_dev(device);
15846 struct net_device *dev = pci_get_drvdata(pdev);
15847 struct tg3 *tp = netdev_priv(dev);
15850 if (!netif_running(dev))
15853 tg3_reset_task_cancel(tp);
15855 tg3_netif_stop(tp);
15857 tg3_timer_stop(tp);
15859 tg3_full_lock(tp, 1);
15860 tg3_disable_ints(tp);
15861 tg3_full_unlock(tp);
15863 netif_device_detach(dev);
15865 tg3_full_lock(tp, 0);
15866 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15867 tg3_flag_clear(tp, INIT_COMPLETE);
15868 tg3_full_unlock(tp);
15870 err = tg3_power_down_prepare(tp);
15874 tg3_full_lock(tp, 0);
15876 tg3_flag_set(tp, INIT_COMPLETE);
15877 err2 = tg3_restart_hw(tp, 1);
15881 tg3_timer_start(tp);
15883 netif_device_attach(dev);
15884 tg3_netif_start(tp);
15887 tg3_full_unlock(tp);
15896 static int tg3_resume(struct device *device)
15898 struct pci_dev *pdev = to_pci_dev(device);
15899 struct net_device *dev = pci_get_drvdata(pdev);
15900 struct tg3 *tp = netdev_priv(dev);
15903 if (!netif_running(dev))
15906 netif_device_attach(dev);
15908 tg3_full_lock(tp, 0);
15910 tg3_flag_set(tp, INIT_COMPLETE);
15911 err = tg3_restart_hw(tp, 1);
15915 tg3_timer_start(tp);
15917 tg3_netif_start(tp);
15920 tg3_full_unlock(tp);
15928 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15929 #define TG3_PM_OPS (&tg3_pm_ops)
15933 #define TG3_PM_OPS NULL
15935 #endif /* CONFIG_PM_SLEEP */
15938 * tg3_io_error_detected - called when PCI error is detected
15939 * @pdev: Pointer to PCI device
15940 * @state: The current pci connection state
15942 * This function is called after a PCI bus error affecting
15943 * this device has been detected.
15945 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15946 pci_channel_state_t state)
15948 struct net_device *netdev = pci_get_drvdata(pdev);
15949 struct tg3 *tp = netdev_priv(netdev);
15950 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15952 netdev_info(netdev, "PCI I/O error detected\n");
15956 if (!netif_running(netdev))
15961 tg3_netif_stop(tp);
15963 tg3_timer_stop(tp);
15965 /* Want to make sure that the reset task doesn't run */
15966 tg3_reset_task_cancel(tp);
15968 netif_device_detach(netdev);
15970 /* Clean up software state, even if MMIO is blocked */
15971 tg3_full_lock(tp, 0);
15972 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15973 tg3_full_unlock(tp);
15976 if (state == pci_channel_io_perm_failure)
15977 err = PCI_ERS_RESULT_DISCONNECT;
15979 pci_disable_device(pdev);
15987 * tg3_io_slot_reset - called after the pci bus has been reset.
15988 * @pdev: Pointer to PCI device
15990 * Restart the card from scratch, as if from a cold-boot.
15991 * At this point, the card has exprienced a hard reset,
15992 * followed by fixups by BIOS, and has its config space
15993 * set up identically to what it was at cold boot.
15995 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15997 struct net_device *netdev = pci_get_drvdata(pdev);
15998 struct tg3 *tp = netdev_priv(netdev);
15999 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16004 if (pci_enable_device(pdev)) {
16005 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16009 pci_set_master(pdev);
16010 pci_restore_state(pdev);
16011 pci_save_state(pdev);
16013 if (!netif_running(netdev)) {
16014 rc = PCI_ERS_RESULT_RECOVERED;
16018 err = tg3_power_up(tp);
16022 rc = PCI_ERS_RESULT_RECOVERED;
16031 * tg3_io_resume - called when traffic can start flowing again.
16032 * @pdev: Pointer to PCI device
16034 * This callback is called when the error recovery driver tells
16035 * us that its OK to resume normal operation.
16037 static void tg3_io_resume(struct pci_dev *pdev)
16039 struct net_device *netdev = pci_get_drvdata(pdev);
16040 struct tg3 *tp = netdev_priv(netdev);
16045 if (!netif_running(netdev))
16048 tg3_full_lock(tp, 0);
16049 tg3_flag_set(tp, INIT_COMPLETE);
16050 err = tg3_restart_hw(tp, 1);
16051 tg3_full_unlock(tp);
16053 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16057 netif_device_attach(netdev);
16059 tg3_timer_start(tp);
16061 tg3_netif_start(tp);
16069 static struct pci_error_handlers tg3_err_handler = {
16070 .error_detected = tg3_io_error_detected,
16071 .slot_reset = tg3_io_slot_reset,
16072 .resume = tg3_io_resume
16075 static struct pci_driver tg3_driver = {
16076 .name = DRV_MODULE_NAME,
16077 .id_table = tg3_pci_tbl,
16078 .probe = tg3_init_one,
16079 .remove = __devexit_p(tg3_remove_one),
16080 .err_handler = &tg3_err_handler,
16081 .driver.pm = TG3_PM_OPS,
16084 static int __init tg3_init(void)
16086 return pci_register_driver(&tg3_driver);
16089 static void __exit tg3_cleanup(void)
16091 pci_unregister_driver(&tg3_driver);
16094 module_init(tg3_init);
16095 module_exit(tg3_cleanup);