2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
56 #include <asm/idprom.h>
65 /* Functions & macros to verify TG3_FLAGS types */
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 return test_bit(flag, bits);
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 clear_bit(flag, bits);
82 #define tg3_flag(tp, flag) \
83 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag) \
85 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag) \
87 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define DRV_MODULE_NAME "tg3"
91 #define TG3_MIN_NUM 123
92 #define DRV_MODULE_VERSION \
93 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE "March 21, 2012"
96 #define RESET_KIND_SHUTDOWN 0
97 #define RESET_KIND_INIT 1
98 #define RESET_KIND_SUSPEND 2
100 #define TG3_DEF_RX_MODE 0
101 #define TG3_DEF_TX_MODE 0
102 #define TG3_DEF_MSG_ENABLE \
112 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
114 /* length of time before we decide the hardware is borked,
115 * and dev->tx_timeout() should be called to fix the problem
118 #define TG3_TX_TIMEOUT (5 * HZ)
120 /* hardware minimum and maximum for a single frame's data payload */
121 #define TG3_MIN_MTU 60
122 #define TG3_MAX_MTU(tp) \
123 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125 /* These numbers seem to be hard coded in the NIC firmware somehow.
126 * You can't change the ring sizes, but you can change where you place
127 * them in the NIC onboard memory.
129 #define TG3_RX_STD_RING_SIZE(tp) \
130 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
131 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
132 #define TG3_DEF_RX_RING_PENDING 200
133 #define TG3_RX_JMB_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
136 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
138 /* Do not place this n-ring entries value into the tp struct itself,
139 * we really want to expose these constants to GCC so that modulo et
140 * al. operations are done with shifts and masks instead of with
141 * hw multiply/modulo instructions. Another solution would be to
142 * replace things like '% foo' with '& (foo - 1)'.
145 #define TG3_TX_RING_SIZE 512
146 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
148 #define TG3_RX_STD_RING_BYTES(tp) \
149 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
150 #define TG3_RX_JMB_RING_BYTES(tp) \
151 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
152 #define TG3_RX_RCB_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
154 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
156 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158 #define TG3_DMA_BYTE_ENAB 64
160 #define TG3_RX_STD_DMA_SZ 1536
161 #define TG3_RX_JMB_DMA_SZ 9046
163 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
165 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
166 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
169 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
172 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
175 * that are at least dword aligned when used in PCIX mode. The driver
176 * works around this bug by double copying the packet. This workaround
177 * is built into the normal double copy length check for efficiency.
179 * However, the double copy is only necessary on those architectures
180 * where unaligned memory accesses are inefficient. For those architectures
181 * where unaligned memory accesses incur little penalty, we can reintegrate
182 * the 5701 in the normal rx path. Doing so saves a device structure
183 * dereference by hardcoding the double copy threshold in place.
185 #define TG3_RX_COPY_THRESHOLD 256
186 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
187 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
189 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
192 #if (NET_IP_ALIGN != 0)
193 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
195 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
198 /* minimum number of free TX descriptors required to wake up TX process */
199 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200 #define TG3_TX_BD_DMA_MAX_2K 2048
201 #define TG3_TX_BD_DMA_MAX_4K 4096
203 #define TG3_RAW_IP_ALIGN 2
205 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
206 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208 #define FIRMWARE_TG3 "tigon/tg3.bin"
209 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
210 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
212 static char version[] __devinitdata =
213 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
216 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_MODULE_VERSION);
219 MODULE_FIRMWARE(FIRMWARE_TG3);
220 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
224 module_param(tg3_debug, int, 0);
225 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
301 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
307 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
308 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
312 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314 static const struct {
315 const char string[ETH_GSTRING_LEN];
316 } ethtool_stats_keys[] = {
319 { "rx_ucast_packets" },
320 { "rx_mcast_packets" },
321 { "rx_bcast_packets" },
323 { "rx_align_errors" },
324 { "rx_xon_pause_rcvd" },
325 { "rx_xoff_pause_rcvd" },
326 { "rx_mac_ctrl_rcvd" },
327 { "rx_xoff_entered" },
328 { "rx_frame_too_long_errors" },
330 { "rx_undersize_packets" },
331 { "rx_in_length_errors" },
332 { "rx_out_length_errors" },
333 { "rx_64_or_less_octet_packets" },
334 { "rx_65_to_127_octet_packets" },
335 { "rx_128_to_255_octet_packets" },
336 { "rx_256_to_511_octet_packets" },
337 { "rx_512_to_1023_octet_packets" },
338 { "rx_1024_to_1522_octet_packets" },
339 { "rx_1523_to_2047_octet_packets" },
340 { "rx_2048_to_4095_octet_packets" },
341 { "rx_4096_to_8191_octet_packets" },
342 { "rx_8192_to_9022_octet_packets" },
349 { "tx_flow_control" },
351 { "tx_single_collisions" },
352 { "tx_mult_collisions" },
354 { "tx_excessive_collisions" },
355 { "tx_late_collisions" },
356 { "tx_collide_2times" },
357 { "tx_collide_3times" },
358 { "tx_collide_4times" },
359 { "tx_collide_5times" },
360 { "tx_collide_6times" },
361 { "tx_collide_7times" },
362 { "tx_collide_8times" },
363 { "tx_collide_9times" },
364 { "tx_collide_10times" },
365 { "tx_collide_11times" },
366 { "tx_collide_12times" },
367 { "tx_collide_13times" },
368 { "tx_collide_14times" },
369 { "tx_collide_15times" },
370 { "tx_ucast_packets" },
371 { "tx_mcast_packets" },
372 { "tx_bcast_packets" },
373 { "tx_carrier_sense_errors" },
377 { "dma_writeq_full" },
378 { "dma_write_prioq_full" },
382 { "rx_threshold_hit" },
384 { "dma_readq_full" },
385 { "dma_read_prioq_full" },
386 { "tx_comp_queue_full" },
388 { "ring_set_send_prod_index" },
389 { "ring_status_update" },
391 { "nic_avoided_irqs" },
392 { "nic_tx_threshold_hit" },
394 { "mbuf_lwm_thresh_hit" },
397 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
400 static const struct {
401 const char string[ETH_GSTRING_LEN];
402 } ethtool_test_keys[] = {
403 { "nvram test (online) " },
404 { "link test (online) " },
405 { "register test (offline)" },
406 { "memory test (offline)" },
407 { "mac loopback test (offline)" },
408 { "phy loopback test (offline)" },
409 { "ext loopback test (offline)" },
410 { "interrupt test (offline)" },
413 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
416 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 writel(val, tp->regs + off);
421 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 return readl(tp->regs + off);
426 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 writel(val, tp->aperegs + off);
431 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 return readl(tp->aperegs + off);
436 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
440 spin_lock_irqsave(&tp->indirect_lock, flags);
441 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
443 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 writel(val, tp->regs + off);
449 readl(tp->regs + off);
452 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
457 spin_lock_irqsave(&tp->indirect_lock, flags);
458 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
459 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
460 spin_unlock_irqrestore(&tp->indirect_lock, flags);
464 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
468 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
469 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
470 TG3_64BIT_REG_LOW, val);
473 if (off == TG3_RX_STD_PROD_IDX_REG) {
474 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
475 TG3_64BIT_REG_LOW, val);
479 spin_lock_irqsave(&tp->indirect_lock, flags);
480 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
482 spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 /* In indirect mode when disabling interrupts, we also need
485 * to clear the interrupt bit in the GRC local ctrl register.
487 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
490 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
494 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
499 spin_lock_irqsave(&tp->indirect_lock, flags);
500 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
501 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
502 spin_unlock_irqrestore(&tp->indirect_lock, flags);
506 /* usec_wait specifies the wait time in usec when writing to certain registers
507 * where it is unsafe to read back the register without some delay.
508 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
509 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
514 /* Non-posted methods */
515 tp->write32(tp, off, val);
518 tg3_write32(tp, off, val);
523 /* Wait again after the read for the posted method to guarantee that
524 * the wait time is met.
530 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 tp->write32_mbox(tp, off, val);
533 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
534 tp->read32_mbox(tp, off);
537 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 void __iomem *mbox = tp->regs + off;
541 if (tg3_flag(tp, TXD_MBOX_HWBUG))
543 if (tg3_flag(tp, MBOX_WRITE_REORDER))
547 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 return readl(tp->regs + off + GRCMBOX_BASE);
552 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 writel(val, tp->regs + off + GRCMBOX_BASE);
557 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
558 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
559 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
560 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
561 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
563 #define tw32(reg, val) tp->write32(tp, reg, val)
564 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
565 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
566 #define tr32(reg) tp->read32(tp, reg)
568 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
572 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
573 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
576 spin_lock_irqsave(&tp->indirect_lock, flags);
577 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
578 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581 /* Always leave this as zero. */
582 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
585 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587 /* Always leave this as zero. */
588 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590 spin_unlock_irqrestore(&tp->indirect_lock, flags);
593 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
597 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
598 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
603 spin_lock_irqsave(&tp->indirect_lock, flags);
604 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
605 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
606 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608 /* Always leave this as zero. */
609 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
612 *val = tr32(TG3PCI_MEM_WIN_DATA);
614 /* Always leave this as zero. */
615 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617 spin_unlock_irqrestore(&tp->indirect_lock, flags);
620 static void tg3_ape_lock_init(struct tg3 *tp)
625 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
626 regbase = TG3_APE_LOCK_GRANT;
628 regbase = TG3_APE_PER_LOCK_GRANT;
630 /* Make sure the driver hasn't any stale locks. */
631 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633 case TG3_APE_LOCK_PHY0:
634 case TG3_APE_LOCK_PHY1:
635 case TG3_APE_LOCK_PHY2:
636 case TG3_APE_LOCK_PHY3:
637 bit = APE_LOCK_GRANT_DRIVER;
641 bit = APE_LOCK_GRANT_DRIVER;
643 bit = 1 << tp->pci_fn;
645 tg3_ape_write32(tp, regbase + 4 * i, bit);
650 static int tg3_ape_lock(struct tg3 *tp, int locknum)
654 u32 status, req, gnt, bit;
656 if (!tg3_flag(tp, ENABLE_APE))
660 case TG3_APE_LOCK_GPIO:
661 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 case TG3_APE_LOCK_GRC:
664 case TG3_APE_LOCK_MEM:
666 bit = APE_LOCK_REQ_DRIVER;
668 bit = 1 << tp->pci_fn;
674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
675 req = TG3_APE_LOCK_REQ;
676 gnt = TG3_APE_LOCK_GRANT;
678 req = TG3_APE_PER_LOCK_REQ;
679 gnt = TG3_APE_PER_LOCK_GRANT;
684 tg3_ape_write32(tp, req + off, bit);
686 /* Wait for up to 1 millisecond to acquire lock. */
687 for (i = 0; i < 100; i++) {
688 status = tg3_ape_read32(tp, gnt + off);
695 /* Revoke the lock request. */
696 tg3_ape_write32(tp, gnt + off, bit);
703 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
707 if (!tg3_flag(tp, ENABLE_APE))
711 case TG3_APE_LOCK_GPIO:
712 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714 case TG3_APE_LOCK_GRC:
715 case TG3_APE_LOCK_MEM:
717 bit = APE_LOCK_GRANT_DRIVER;
719 bit = 1 << tp->pci_fn;
725 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
726 gnt = TG3_APE_LOCK_GRANT;
728 gnt = TG3_APE_PER_LOCK_GRANT;
730 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
738 /* NCSI does not support APE events */
739 if (tg3_flag(tp, APE_HAS_NCSI))
742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
743 if (apedata != APE_SEG_SIG_MAGIC)
746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
747 if (!(apedata & APE_FW_STATUS_READY))
750 /* Wait for up to 1 millisecond for APE to service previous event. */
751 for (i = 0; i < 10; i++) {
752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
759 event | APE_EVENT_STATUS_EVENT_PENDING);
761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
778 if (!tg3_flag(tp, ENABLE_APE))
782 case RESET_KIND_INIT:
783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
784 APE_HOST_SEG_SIG_MAGIC);
785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
786 APE_HOST_SEG_LEN_MAGIC);
787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
792 APE_HOST_BEHAV_NO_PHYLOCK);
793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
794 TG3_APE_HOST_DRVR_STATE_START);
796 event = APE_EVENT_STATUS_STATE_START;
798 case RESET_KIND_SHUTDOWN:
799 /* With the interface we are currently using,
800 * APE does not track driver state. Wiping
801 * out the HOST SEGMENT SIGNATURE forces
802 * the APE to assume OS absent status.
804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806 if (device_may_wakeup(&tp->pdev->dev) &&
807 tg3_flag(tp, WOL_ENABLE)) {
808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
809 TG3_APE_HOST_WOL_SPEED_AUTO);
810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816 event = APE_EVENT_STATUS_STATE_UNLOAD;
818 case RESET_KIND_SUSPEND:
819 event = APE_EVENT_STATUS_STATE_SUSPEND;
825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827 tg3_ape_send_event(tp, event);
830 static void tg3_disable_ints(struct tg3 *tp)
834 tw32(TG3PCI_MISC_HOST_CTRL,
835 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
836 for (i = 0; i < tp->irq_max; i++)
837 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
840 static void tg3_enable_ints(struct tg3 *tp)
847 tw32(TG3PCI_MISC_HOST_CTRL,
848 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
851 for (i = 0; i < tp->irq_cnt; i++) {
852 struct tg3_napi *tnapi = &tp->napi[i];
854 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
855 if (tg3_flag(tp, 1SHOT_MSI))
856 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858 tp->coal_now |= tnapi->coal_now;
861 /* Force an initial interrupt */
862 if (!tg3_flag(tp, TAGGED_STATUS) &&
863 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
864 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866 tw32(HOSTCC_MODE, tp->coal_now);
868 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
871 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 struct tg3 *tp = tnapi->tp;
874 struct tg3_hw_status *sblk = tnapi->hw_status;
875 unsigned int work_exists = 0;
877 /* check for phy events */
878 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
879 if (sblk->status & SD_STATUS_LINK_CHG)
883 /* check for TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
887 /* check for RX work to do */
888 if (tnapi->rx_rcb_prod_idx &&
889 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
896 * similar to tg3_enable_ints, but it accurately determines whether there
897 * is new work pending and can return without flushing the PIO write
898 * which reenables interrupts
900 static void tg3_int_reenable(struct tg3_napi *tnapi)
902 struct tg3 *tp = tnapi->tp;
904 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
907 /* When doing tagged status, this work check is unnecessary.
908 * The last_tag we write above tells the chip which piece of
909 * work we've completed.
911 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
912 tw32(HOSTCC_MODE, tp->coalesce_mode |
913 HOSTCC_MODE_ENABLE | tnapi->coal_now);
916 static void tg3_switch_clocks(struct tg3 *tp)
921 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
924 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
926 orig_clock_ctrl = clock_ctrl;
927 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
928 CLOCK_CTRL_CLKRUN_OENABLE |
930 tp->pci_clock_ctrl = clock_ctrl;
932 if (tg3_flag(tp, 5705_PLUS)) {
933 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
937 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
938 tw32_wait_f(TG3PCI_CLOCK_CTRL,
940 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL,
943 clock_ctrl | (CLOCK_CTRL_ALTCLK),
946 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
949 #define PHY_BUSY_LOOPS 5000
951 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
957 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
959 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
965 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
966 MI_COM_PHY_ADDR_MASK);
967 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
968 MI_COM_REG_ADDR_MASK);
969 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
971 tw32_f(MAC_MI_COM, frame_val);
973 loops = PHY_BUSY_LOOPS;
976 frame_val = tr32(MAC_MI_COM);
978 if ((frame_val & MI_COM_BUSY) == 0) {
980 frame_val = tr32(MAC_MI_COM);
988 *val = frame_val & MI_COM_DATA_MASK;
992 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
993 tw32_f(MAC_MI_MODE, tp->mi_mode);
1000 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1006 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1007 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1010 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1012 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1016 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1017 MI_COM_PHY_ADDR_MASK);
1018 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1019 MI_COM_REG_ADDR_MASK);
1020 frame_val |= (val & MI_COM_DATA_MASK);
1021 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1023 tw32_f(MAC_MI_COM, frame_val);
1025 loops = PHY_BUSY_LOOPS;
1026 while (loops != 0) {
1028 frame_val = tr32(MAC_MI_COM);
1029 if ((frame_val & MI_COM_BUSY) == 0) {
1031 frame_val = tr32(MAC_MI_COM);
1041 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1042 tw32_f(MAC_MI_MODE, tp->mi_mode);
1049 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1053 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1057 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1061 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1062 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1066 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1072 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1076 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1080 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1084 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1085 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1089 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1095 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1099 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1101 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1106 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1110 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1112 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1117 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1121 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1122 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1123 MII_TG3_AUXCTL_SHDWSEL_MISC);
1125 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1130 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1132 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1133 set |= MII_TG3_AUXCTL_MISC_WREN;
1135 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1138 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1139 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1140 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1141 MII_TG3_AUXCTL_ACTL_TX_6DB)
1143 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1144 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1145 MII_TG3_AUXCTL_ACTL_TX_6DB);
1147 static int tg3_bmcr_reset(struct tg3 *tp)
1152 /* OK, reset it, and poll the BMCR_RESET bit until it
1153 * clears or we time out.
1155 phy_control = BMCR_RESET;
1156 err = tg3_writephy(tp, MII_BMCR, phy_control);
1162 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1166 if ((phy_control & BMCR_RESET) == 0) {
1178 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1180 struct tg3 *tp = bp->priv;
1183 spin_lock_bh(&tp->lock);
1185 if (tg3_readphy(tp, reg, &val))
1188 spin_unlock_bh(&tp->lock);
1193 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1195 struct tg3 *tp = bp->priv;
1198 spin_lock_bh(&tp->lock);
1200 if (tg3_writephy(tp, reg, val))
1203 spin_unlock_bh(&tp->lock);
1208 static int tg3_mdio_reset(struct mii_bus *bp)
1213 static void tg3_mdio_config_5785(struct tg3 *tp)
1216 struct phy_device *phydev;
1218 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1219 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1220 case PHY_ID_BCM50610:
1221 case PHY_ID_BCM50610M:
1222 val = MAC_PHYCFG2_50610_LED_MODES;
1224 case PHY_ID_BCMAC131:
1225 val = MAC_PHYCFG2_AC131_LED_MODES;
1227 case PHY_ID_RTL8211C:
1228 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1230 case PHY_ID_RTL8201E:
1231 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1237 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1238 tw32(MAC_PHYCFG2, val);
1240 val = tr32(MAC_PHYCFG1);
1241 val &= ~(MAC_PHYCFG1_RGMII_INT |
1242 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1243 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1244 tw32(MAC_PHYCFG1, val);
1249 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1250 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1251 MAC_PHYCFG2_FMODE_MASK_MASK |
1252 MAC_PHYCFG2_GMODE_MASK_MASK |
1253 MAC_PHYCFG2_ACT_MASK_MASK |
1254 MAC_PHYCFG2_QUAL_MASK_MASK |
1255 MAC_PHYCFG2_INBAND_ENABLE;
1257 tw32(MAC_PHYCFG2, val);
1259 val = tr32(MAC_PHYCFG1);
1260 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1261 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1262 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1263 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1264 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1265 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1266 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1268 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1269 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1270 tw32(MAC_PHYCFG1, val);
1272 val = tr32(MAC_EXT_RGMII_MODE);
1273 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1274 MAC_RGMII_MODE_RX_QUALITY |
1275 MAC_RGMII_MODE_RX_ACTIVITY |
1276 MAC_RGMII_MODE_RX_ENG_DET |
1277 MAC_RGMII_MODE_TX_ENABLE |
1278 MAC_RGMII_MODE_TX_LOWPWR |
1279 MAC_RGMII_MODE_TX_RESET);
1280 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1281 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1282 val |= MAC_RGMII_MODE_RX_INT_B |
1283 MAC_RGMII_MODE_RX_QUALITY |
1284 MAC_RGMII_MODE_RX_ACTIVITY |
1285 MAC_RGMII_MODE_RX_ENG_DET;
1286 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1287 val |= MAC_RGMII_MODE_TX_ENABLE |
1288 MAC_RGMII_MODE_TX_LOWPWR |
1289 MAC_RGMII_MODE_TX_RESET;
1291 tw32(MAC_EXT_RGMII_MODE, val);
1294 static void tg3_mdio_start(struct tg3 *tp)
1296 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1297 tw32_f(MAC_MI_MODE, tp->mi_mode);
1300 if (tg3_flag(tp, MDIOBUS_INITED) &&
1301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1302 tg3_mdio_config_5785(tp);
1305 static int tg3_mdio_init(struct tg3 *tp)
1309 struct phy_device *phydev;
1311 if (tg3_flag(tp, 5717_PLUS)) {
1314 tp->phy_addr = tp->pci_fn + 1;
1316 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1317 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1319 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1320 TG3_CPMU_PHY_STRAP_IS_SERDES;
1324 tp->phy_addr = TG3_PHY_MII_ADDR;
1328 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1331 tp->mdio_bus = mdiobus_alloc();
1332 if (tp->mdio_bus == NULL)
1335 tp->mdio_bus->name = "tg3 mdio bus";
1336 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1337 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1338 tp->mdio_bus->priv = tp;
1339 tp->mdio_bus->parent = &tp->pdev->dev;
1340 tp->mdio_bus->read = &tg3_mdio_read;
1341 tp->mdio_bus->write = &tg3_mdio_write;
1342 tp->mdio_bus->reset = &tg3_mdio_reset;
1343 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1344 tp->mdio_bus->irq = &tp->mdio_irq[0];
1346 for (i = 0; i < PHY_MAX_ADDR; i++)
1347 tp->mdio_bus->irq[i] = PHY_POLL;
1349 /* The bus registration will look for all the PHYs on the mdio bus.
1350 * Unfortunately, it does not ensure the PHY is powered up before
1351 * accessing the PHY ID registers. A chip reset is the
1352 * quickest way to bring the device back to an operational state..
1354 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1357 i = mdiobus_register(tp->mdio_bus);
1359 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1360 mdiobus_free(tp->mdio_bus);
1364 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1366 if (!phydev || !phydev->drv) {
1367 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1368 mdiobus_unregister(tp->mdio_bus);
1369 mdiobus_free(tp->mdio_bus);
1373 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1374 case PHY_ID_BCM57780:
1375 phydev->interface = PHY_INTERFACE_MODE_GMII;
1376 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1378 case PHY_ID_BCM50610:
1379 case PHY_ID_BCM50610M:
1380 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1381 PHY_BRCM_RX_REFCLK_UNUSED |
1382 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1383 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1384 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1385 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1386 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1387 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1388 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1389 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1391 case PHY_ID_RTL8211C:
1392 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1394 case PHY_ID_RTL8201E:
1395 case PHY_ID_BCMAC131:
1396 phydev->interface = PHY_INTERFACE_MODE_MII;
1397 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1398 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1402 tg3_flag_set(tp, MDIOBUS_INITED);
1404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1405 tg3_mdio_config_5785(tp);
1410 static void tg3_mdio_fini(struct tg3 *tp)
1412 if (tg3_flag(tp, MDIOBUS_INITED)) {
1413 tg3_flag_clear(tp, MDIOBUS_INITED);
1414 mdiobus_unregister(tp->mdio_bus);
1415 mdiobus_free(tp->mdio_bus);
1419 /* tp->lock is held. */
1420 static inline void tg3_generate_fw_event(struct tg3 *tp)
1424 val = tr32(GRC_RX_CPU_EVENT);
1425 val |= GRC_RX_CPU_DRIVER_EVENT;
1426 tw32_f(GRC_RX_CPU_EVENT, val);
1428 tp->last_event_jiffies = jiffies;
1431 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1433 /* tp->lock is held. */
1434 static void tg3_wait_for_event_ack(struct tg3 *tp)
1437 unsigned int delay_cnt;
1440 /* If enough time has passed, no wait is necessary. */
1441 time_remain = (long)(tp->last_event_jiffies + 1 +
1442 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1444 if (time_remain < 0)
1447 /* Check if we can shorten the wait time. */
1448 delay_cnt = jiffies_to_usecs(time_remain);
1449 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1450 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1451 delay_cnt = (delay_cnt >> 3) + 1;
1453 for (i = 0; i < delay_cnt; i++) {
1454 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1460 /* tp->lock is held. */
1461 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1466 if (!tg3_readphy(tp, MII_BMCR, ®))
1468 if (!tg3_readphy(tp, MII_BMSR, ®))
1469 val |= (reg & 0xffff);
1473 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1475 if (!tg3_readphy(tp, MII_LPA, ®))
1476 val |= (reg & 0xffff);
1480 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1481 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1483 if (!tg3_readphy(tp, MII_STAT1000, ®))
1484 val |= (reg & 0xffff);
1488 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1495 /* tp->lock is held. */
1496 static void tg3_ump_link_report(struct tg3 *tp)
1500 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1503 tg3_phy_gather_ump_data(tp, data);
1505 tg3_wait_for_event_ack(tp);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1509 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1510 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1511 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1512 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1514 tg3_generate_fw_event(tp);
1517 /* tp->lock is held. */
1518 static void tg3_stop_fw(struct tg3 *tp)
1520 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1521 /* Wait for RX cpu to ACK the previous event. */
1522 tg3_wait_for_event_ack(tp);
1524 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1526 tg3_generate_fw_event(tp);
1528 /* Wait for RX cpu to ACK this event. */
1529 tg3_wait_for_event_ack(tp);
1533 /* tp->lock is held. */
1534 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1536 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1537 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1539 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1541 case RESET_KIND_INIT:
1542 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1546 case RESET_KIND_SHUTDOWN:
1547 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1551 case RESET_KIND_SUSPEND:
1552 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1561 if (kind == RESET_KIND_INIT ||
1562 kind == RESET_KIND_SUSPEND)
1563 tg3_ape_driver_state_change(tp, kind);
1566 /* tp->lock is held. */
1567 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1569 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1571 case RESET_KIND_INIT:
1572 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1573 DRV_STATE_START_DONE);
1576 case RESET_KIND_SHUTDOWN:
1577 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1578 DRV_STATE_UNLOAD_DONE);
1586 if (kind == RESET_KIND_SHUTDOWN)
1587 tg3_ape_driver_state_change(tp, kind);
1590 /* tp->lock is held. */
1591 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1593 if (tg3_flag(tp, ENABLE_ASF)) {
1595 case RESET_KIND_INIT:
1596 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1600 case RESET_KIND_SHUTDOWN:
1601 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1605 case RESET_KIND_SUSPEND:
1606 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1616 static int tg3_poll_fw(struct tg3 *tp)
1621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1622 /* Wait up to 20ms for init done. */
1623 for (i = 0; i < 200; i++) {
1624 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1631 /* Wait for firmware initialization to complete. */
1632 for (i = 0; i < 100000; i++) {
1633 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1634 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1639 /* Chip might not be fitted with firmware. Some Sun onboard
1640 * parts are configured like that. So don't signal the timeout
1641 * of the above loop as an error, but do report the lack of
1642 * running firmware once.
1644 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1645 tg3_flag_set(tp, NO_FWARE_REPORTED);
1647 netdev_info(tp->dev, "No firmware running\n");
1650 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1651 /* The 57765 A0 needs a little more
1652 * time to do some important work.
1660 static void tg3_link_report(struct tg3 *tp)
1662 if (!netif_carrier_ok(tp->dev)) {
1663 netif_info(tp, link, tp->dev, "Link is down\n");
1664 tg3_ump_link_report(tp);
1665 } else if (netif_msg_link(tp)) {
1666 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1667 (tp->link_config.active_speed == SPEED_1000 ?
1669 (tp->link_config.active_speed == SPEED_100 ?
1671 (tp->link_config.active_duplex == DUPLEX_FULL ?
1674 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1675 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1677 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1680 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1681 netdev_info(tp->dev, "EEE is %s\n",
1682 tp->setlpicnt ? "enabled" : "disabled");
1684 tg3_ump_link_report(tp);
1688 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1692 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1693 miireg = ADVERTISE_1000XPAUSE;
1694 else if (flow_ctrl & FLOW_CTRL_TX)
1695 miireg = ADVERTISE_1000XPSE_ASYM;
1696 else if (flow_ctrl & FLOW_CTRL_RX)
1697 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1704 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1708 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1709 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1710 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1711 if (lcladv & ADVERTISE_1000XPAUSE)
1713 if (rmtadv & ADVERTISE_1000XPAUSE)
1720 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1724 u32 old_rx_mode = tp->rx_mode;
1725 u32 old_tx_mode = tp->tx_mode;
1727 if (tg3_flag(tp, USE_PHYLIB))
1728 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1730 autoneg = tp->link_config.autoneg;
1732 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1733 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1734 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1736 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1738 flowctrl = tp->link_config.flowctrl;
1740 tp->link_config.active_flowctrl = flowctrl;
1742 if (flowctrl & FLOW_CTRL_RX)
1743 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1745 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1747 if (old_rx_mode != tp->rx_mode)
1748 tw32_f(MAC_RX_MODE, tp->rx_mode);
1750 if (flowctrl & FLOW_CTRL_TX)
1751 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1753 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1755 if (old_tx_mode != tp->tx_mode)
1756 tw32_f(MAC_TX_MODE, tp->tx_mode);
1759 static void tg3_adjust_link(struct net_device *dev)
1761 u8 oldflowctrl, linkmesg = 0;
1762 u32 mac_mode, lcl_adv, rmt_adv;
1763 struct tg3 *tp = netdev_priv(dev);
1764 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1766 spin_lock_bh(&tp->lock);
1768 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1769 MAC_MODE_HALF_DUPLEX);
1771 oldflowctrl = tp->link_config.active_flowctrl;
1777 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1778 mac_mode |= MAC_MODE_PORT_MODE_MII;
1779 else if (phydev->speed == SPEED_1000 ||
1780 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1781 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1783 mac_mode |= MAC_MODE_PORT_MODE_MII;
1785 if (phydev->duplex == DUPLEX_HALF)
1786 mac_mode |= MAC_MODE_HALF_DUPLEX;
1788 lcl_adv = mii_advertise_flowctrl(
1789 tp->link_config.flowctrl);
1792 rmt_adv = LPA_PAUSE_CAP;
1793 if (phydev->asym_pause)
1794 rmt_adv |= LPA_PAUSE_ASYM;
1797 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1799 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1801 if (mac_mode != tp->mac_mode) {
1802 tp->mac_mode = mac_mode;
1803 tw32_f(MAC_MODE, tp->mac_mode);
1807 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1808 if (phydev->speed == SPEED_10)
1810 MAC_MI_STAT_10MBPS_MODE |
1811 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1813 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1816 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1817 tw32(MAC_TX_LENGTHS,
1818 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1819 (6 << TX_LENGTHS_IPG_SHIFT) |
1820 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822 tw32(MAC_TX_LENGTHS,
1823 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1824 (6 << TX_LENGTHS_IPG_SHIFT) |
1825 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1827 if (phydev->link != tp->old_link ||
1828 phydev->speed != tp->link_config.active_speed ||
1829 phydev->duplex != tp->link_config.active_duplex ||
1830 oldflowctrl != tp->link_config.active_flowctrl)
1833 tp->old_link = phydev->link;
1834 tp->link_config.active_speed = phydev->speed;
1835 tp->link_config.active_duplex = phydev->duplex;
1837 spin_unlock_bh(&tp->lock);
1840 tg3_link_report(tp);
1843 static int tg3_phy_init(struct tg3 *tp)
1845 struct phy_device *phydev;
1847 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1850 /* Bring the PHY back to a known state. */
1853 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1855 /* Attach the MAC to the PHY. */
1856 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1857 phydev->dev_flags, phydev->interface);
1858 if (IS_ERR(phydev)) {
1859 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1860 return PTR_ERR(phydev);
1863 /* Mask with MAC supported features. */
1864 switch (phydev->interface) {
1865 case PHY_INTERFACE_MODE_GMII:
1866 case PHY_INTERFACE_MODE_RGMII:
1867 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1868 phydev->supported &= (PHY_GBIT_FEATURES |
1870 SUPPORTED_Asym_Pause);
1874 case PHY_INTERFACE_MODE_MII:
1875 phydev->supported &= (PHY_BASIC_FEATURES |
1877 SUPPORTED_Asym_Pause);
1880 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1884 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1886 phydev->advertising = phydev->supported;
1891 static void tg3_phy_start(struct tg3 *tp)
1893 struct phy_device *phydev;
1895 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1898 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1900 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1901 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1902 phydev->speed = tp->link_config.speed;
1903 phydev->duplex = tp->link_config.duplex;
1904 phydev->autoneg = tp->link_config.autoneg;
1905 phydev->advertising = tp->link_config.advertising;
1910 phy_start_aneg(phydev);
1913 static void tg3_phy_stop(struct tg3 *tp)
1915 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1918 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921 static void tg3_phy_fini(struct tg3 *tp)
1923 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1924 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1925 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1929 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1934 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1937 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1938 /* Cannot do read-modify-write on 5401 */
1939 err = tg3_phy_auxctl_write(tp,
1940 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1941 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1946 err = tg3_phy_auxctl_read(tp,
1947 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1951 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1952 err = tg3_phy_auxctl_write(tp,
1953 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1959 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1963 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1966 tg3_writephy(tp, MII_TG3_FET_TEST,
1967 phytest | MII_TG3_FET_SHADOW_EN);
1968 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1970 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1972 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1973 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1975 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1979 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1983 if (!tg3_flag(tp, 5705_PLUS) ||
1984 (tg3_flag(tp, 5717_PLUS) &&
1985 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1988 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1989 tg3_phy_fet_toggle_apd(tp, enable);
1993 reg = MII_TG3_MISC_SHDW_WREN |
1994 MII_TG3_MISC_SHDW_SCR5_SEL |
1995 MII_TG3_MISC_SHDW_SCR5_LPED |
1996 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1997 MII_TG3_MISC_SHDW_SCR5_SDTL |
1998 MII_TG3_MISC_SHDW_SCR5_C125OE;
1999 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2000 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2002 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2005 reg = MII_TG3_MISC_SHDW_WREN |
2006 MII_TG3_MISC_SHDW_APD_SEL |
2007 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2009 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2011 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2014 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2018 if (!tg3_flag(tp, 5705_PLUS) ||
2019 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2022 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2025 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2026 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2028 tg3_writephy(tp, MII_TG3_FET_TEST,
2029 ephy | MII_TG3_FET_SHADOW_EN);
2030 if (!tg3_readphy(tp, reg, &phy)) {
2032 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2034 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2035 tg3_writephy(tp, reg, phy);
2037 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2042 ret = tg3_phy_auxctl_read(tp,
2043 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2046 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2048 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2049 tg3_phy_auxctl_write(tp,
2050 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2055 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2060 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2063 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2065 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2066 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2069 static void tg3_phy_apply_otp(struct tg3 *tp)
2078 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2081 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2082 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2083 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2085 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2086 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2087 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2089 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2090 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2091 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2093 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2094 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2096 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2097 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2099 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2100 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2101 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2103 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2106 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2110 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2115 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2116 current_link_up == 1 &&
2117 tp->link_config.active_duplex == DUPLEX_FULL &&
2118 (tp->link_config.active_speed == SPEED_100 ||
2119 tp->link_config.active_speed == SPEED_1000)) {
2122 if (tp->link_config.active_speed == SPEED_1000)
2123 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2125 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2127 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2129 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2130 TG3_CL45_D7_EEERES_STAT, &val);
2132 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2133 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2137 if (!tp->setlpicnt) {
2138 if (current_link_up == 1 &&
2139 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2140 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2141 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2144 val = tr32(TG3_CPMU_EEE_MODE);
2145 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2149 static void tg3_phy_eee_enable(struct tg3 *tp)
2153 if (tp->link_config.active_speed == SPEED_1000 &&
2154 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2156 tg3_flag(tp, 57765_CLASS)) &&
2157 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2158 val = MII_TG3_DSP_TAP26_ALNOKO |
2159 MII_TG3_DSP_TAP26_RMRXSTO;
2160 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2161 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2164 val = tr32(TG3_CPMU_EEE_MODE);
2165 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2168 static int tg3_wait_macro_done(struct tg3 *tp)
2175 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2176 if ((tmp32 & 0x1000) == 0)
2186 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2188 static const u32 test_pat[4][6] = {
2189 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2190 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2191 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2192 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2196 for (chan = 0; chan < 4; chan++) {
2199 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2200 (chan * 0x2000) | 0x0200);
2201 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2203 for (i = 0; i < 6; i++)
2204 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2207 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2208 if (tg3_wait_macro_done(tp)) {
2213 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2214 (chan * 0x2000) | 0x0200);
2215 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2216 if (tg3_wait_macro_done(tp)) {
2221 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2222 if (tg3_wait_macro_done(tp)) {
2227 for (i = 0; i < 6; i += 2) {
2230 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2231 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2232 tg3_wait_macro_done(tp)) {
2238 if (low != test_pat[chan][i] ||
2239 high != test_pat[chan][i+1]) {
2240 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2241 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2242 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2252 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2256 for (chan = 0; chan < 4; chan++) {
2259 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2260 (chan * 0x2000) | 0x0200);
2261 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2262 for (i = 0; i < 6; i++)
2263 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2264 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2265 if (tg3_wait_macro_done(tp))
2272 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2274 u32 reg32, phy9_orig;
2275 int retries, do_phy_reset, err;
2281 err = tg3_bmcr_reset(tp);
2287 /* Disable transmitter and interrupt. */
2288 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2292 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2294 /* Set full-duplex, 1000 mbps. */
2295 tg3_writephy(tp, MII_BMCR,
2296 BMCR_FULLDPLX | BMCR_SPEED1000);
2298 /* Set to master mode. */
2299 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2302 tg3_writephy(tp, MII_CTRL1000,
2303 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2305 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2309 /* Block the PHY control access. */
2310 tg3_phydsp_write(tp, 0x8005, 0x0800);
2312 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2315 } while (--retries);
2317 err = tg3_phy_reset_chanpat(tp);
2321 tg3_phydsp_write(tp, 0x8005, 0x0000);
2323 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2324 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2326 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2328 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2330 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2332 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2339 /* This will reset the tigon3 PHY if there is no valid
2340 * link unless the FORCE argument is non-zero.
2342 static int tg3_phy_reset(struct tg3 *tp)
2347 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2348 val = tr32(GRC_MISC_CFG);
2349 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2352 err = tg3_readphy(tp, MII_BMSR, &val);
2353 err |= tg3_readphy(tp, MII_BMSR, &val);
2357 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2358 netif_carrier_off(tp->dev);
2359 tg3_link_report(tp);
2362 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2363 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2364 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2365 err = tg3_phy_reset_5703_4_5(tp);
2372 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2373 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2374 cpmuctrl = tr32(TG3_CPMU_CTRL);
2375 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2377 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2380 err = tg3_bmcr_reset(tp);
2384 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2385 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2386 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2388 tw32(TG3_CPMU_CTRL, cpmuctrl);
2391 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2392 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2393 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2394 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2395 CPMU_LSPD_1000MB_MACCLK_12_5) {
2396 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2398 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2402 if (tg3_flag(tp, 5717_PLUS) &&
2403 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2406 tg3_phy_apply_otp(tp);
2408 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2409 tg3_phy_toggle_apd(tp, true);
2411 tg3_phy_toggle_apd(tp, false);
2414 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2415 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2416 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2417 tg3_phydsp_write(tp, 0x000a, 0x0323);
2418 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2421 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2422 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2423 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2426 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2427 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2428 tg3_phydsp_write(tp, 0x000a, 0x310b);
2429 tg3_phydsp_write(tp, 0x201f, 0x9506);
2430 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2431 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2433 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2434 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2435 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2436 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2437 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2438 tg3_writephy(tp, MII_TG3_TEST1,
2439 MII_TG3_TEST1_TRIM_EN | 0x4);
2441 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2443 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2447 /* Set Extended packet length bit (bit 14) on all chips that */
2448 /* support jumbo frames */
2449 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2450 /* Cannot do read-modify-write on 5401 */
2451 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2452 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2453 /* Set bit 14 with read-modify-write to preserve other bits */
2454 err = tg3_phy_auxctl_read(tp,
2455 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2457 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2458 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2461 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2462 * jumbo frames transmission.
2464 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2465 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2466 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2467 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2470 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2471 /* adjust output voltage */
2472 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2475 tg3_phy_toggle_automdix(tp, 1);
2476 tg3_phy_set_wirespeed(tp);
2480 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2481 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2482 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2483 TG3_GPIO_MSG_NEED_VAUX)
2484 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2485 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2486 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2487 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2488 (TG3_GPIO_MSG_DRVR_PRES << 12))
2490 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2491 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2492 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2493 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2494 (TG3_GPIO_MSG_NEED_VAUX << 12))
2496 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2500 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2501 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2502 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2504 status = tr32(TG3_CPMU_DRV_STATUS);
2506 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2507 status &= ~(TG3_GPIO_MSG_MASK << shift);
2508 status |= (newstat << shift);
2510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2511 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2512 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2514 tw32(TG3_CPMU_DRV_STATUS, status);
2516 return status >> TG3_APE_GPIO_MSG_SHIFT;
2519 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2521 if (!tg3_flag(tp, IS_NIC))
2524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2526 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2527 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2530 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2532 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2533 TG3_GRC_LCLCTL_PWRSW_DELAY);
2535 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2537 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2538 TG3_GRC_LCLCTL_PWRSW_DELAY);
2544 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2548 if (!tg3_flag(tp, IS_NIC) ||
2549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2553 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2555 tw32_wait_f(GRC_LOCAL_CTRL,
2556 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2557 TG3_GRC_LCLCTL_PWRSW_DELAY);
2559 tw32_wait_f(GRC_LOCAL_CTRL,
2561 TG3_GRC_LCLCTL_PWRSW_DELAY);
2563 tw32_wait_f(GRC_LOCAL_CTRL,
2564 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2565 TG3_GRC_LCLCTL_PWRSW_DELAY);
2568 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2570 if (!tg3_flag(tp, IS_NIC))
2573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2574 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2575 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2576 (GRC_LCLCTRL_GPIO_OE0 |
2577 GRC_LCLCTRL_GPIO_OE1 |
2578 GRC_LCLCTRL_GPIO_OE2 |
2579 GRC_LCLCTRL_GPIO_OUTPUT0 |
2580 GRC_LCLCTRL_GPIO_OUTPUT1),
2581 TG3_GRC_LCLCTL_PWRSW_DELAY);
2582 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2583 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2584 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2585 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2586 GRC_LCLCTRL_GPIO_OE1 |
2587 GRC_LCLCTRL_GPIO_OE2 |
2588 GRC_LCLCTRL_GPIO_OUTPUT0 |
2589 GRC_LCLCTRL_GPIO_OUTPUT1 |
2591 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592 TG3_GRC_LCLCTL_PWRSW_DELAY);
2594 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2595 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596 TG3_GRC_LCLCTL_PWRSW_DELAY);
2598 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2599 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2600 TG3_GRC_LCLCTL_PWRSW_DELAY);
2603 u32 grc_local_ctrl = 0;
2605 /* Workaround to prevent overdrawing Amps. */
2606 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2607 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2608 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2610 TG3_GRC_LCLCTL_PWRSW_DELAY);
2613 /* On 5753 and variants, GPIO2 cannot be used. */
2614 no_gpio2 = tp->nic_sram_data_cfg &
2615 NIC_SRAM_DATA_CFG_NO_GPIO2;
2617 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2618 GRC_LCLCTRL_GPIO_OE1 |
2619 GRC_LCLCTRL_GPIO_OE2 |
2620 GRC_LCLCTRL_GPIO_OUTPUT1 |
2621 GRC_LCLCTRL_GPIO_OUTPUT2;
2623 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2624 GRC_LCLCTRL_GPIO_OUTPUT2);
2626 tw32_wait_f(GRC_LOCAL_CTRL,
2627 tp->grc_local_ctrl | grc_local_ctrl,
2628 TG3_GRC_LCLCTL_PWRSW_DELAY);
2630 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2632 tw32_wait_f(GRC_LOCAL_CTRL,
2633 tp->grc_local_ctrl | grc_local_ctrl,
2634 TG3_GRC_LCLCTL_PWRSW_DELAY);
2637 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2638 tw32_wait_f(GRC_LOCAL_CTRL,
2639 tp->grc_local_ctrl | grc_local_ctrl,
2640 TG3_GRC_LCLCTL_PWRSW_DELAY);
2645 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2649 /* Serialize power state transitions */
2650 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2653 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2654 msg = TG3_GPIO_MSG_NEED_VAUX;
2656 msg = tg3_set_function_status(tp, msg);
2658 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2661 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2662 tg3_pwrsrc_switch_to_vaux(tp);
2664 tg3_pwrsrc_die_with_vmain(tp);
2667 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2670 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2672 bool need_vaux = false;
2674 /* The GPIOs do something completely different on 57765. */
2675 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2680 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2681 tg3_frob_aux_power_5717(tp, include_wol ?
2682 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2686 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2687 struct net_device *dev_peer;
2689 dev_peer = pci_get_drvdata(tp->pdev_peer);
2691 /* remove_one() may have been run on the peer. */
2693 struct tg3 *tp_peer = netdev_priv(dev_peer);
2695 if (tg3_flag(tp_peer, INIT_COMPLETE))
2698 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2699 tg3_flag(tp_peer, ENABLE_ASF))
2704 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2705 tg3_flag(tp, ENABLE_ASF))
2709 tg3_pwrsrc_switch_to_vaux(tp);
2711 tg3_pwrsrc_die_with_vmain(tp);
2714 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2716 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2718 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2719 if (speed != SPEED_10)
2721 } else if (speed == SPEED_10)
2727 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2731 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2732 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2733 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2734 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2737 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2738 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2739 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2746 val = tr32(GRC_MISC_CFG);
2747 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2750 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2752 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2755 tg3_writephy(tp, MII_ADVERTISE, 0);
2756 tg3_writephy(tp, MII_BMCR,
2757 BMCR_ANENABLE | BMCR_ANRESTART);
2759 tg3_writephy(tp, MII_TG3_FET_TEST,
2760 phytest | MII_TG3_FET_SHADOW_EN);
2761 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2762 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2764 MII_TG3_FET_SHDW_AUXMODE4,
2767 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2770 } else if (do_low_power) {
2771 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2772 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2774 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2775 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2776 MII_TG3_AUXCTL_PCTL_VREG_11V;
2777 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2780 /* The PHY should not be powered down on some chips because
2783 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2785 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2786 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2787 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2791 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2792 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2793 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2794 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2795 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2796 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2799 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2802 /* tp->lock is held. */
2803 static int tg3_nvram_lock(struct tg3 *tp)
2805 if (tg3_flag(tp, NVRAM)) {
2808 if (tp->nvram_lock_cnt == 0) {
2809 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2810 for (i = 0; i < 8000; i++) {
2811 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2816 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2820 tp->nvram_lock_cnt++;
2825 /* tp->lock is held. */
2826 static void tg3_nvram_unlock(struct tg3 *tp)
2828 if (tg3_flag(tp, NVRAM)) {
2829 if (tp->nvram_lock_cnt > 0)
2830 tp->nvram_lock_cnt--;
2831 if (tp->nvram_lock_cnt == 0)
2832 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2836 /* tp->lock is held. */
2837 static void tg3_enable_nvram_access(struct tg3 *tp)
2839 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2840 u32 nvaccess = tr32(NVRAM_ACCESS);
2842 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2846 /* tp->lock is held. */
2847 static void tg3_disable_nvram_access(struct tg3 *tp)
2849 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2850 u32 nvaccess = tr32(NVRAM_ACCESS);
2852 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2856 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2857 u32 offset, u32 *val)
2862 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2865 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2866 EEPROM_ADDR_DEVID_MASK |
2868 tw32(GRC_EEPROM_ADDR,
2870 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2871 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2872 EEPROM_ADDR_ADDR_MASK) |
2873 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2875 for (i = 0; i < 1000; i++) {
2876 tmp = tr32(GRC_EEPROM_ADDR);
2878 if (tmp & EEPROM_ADDR_COMPLETE)
2882 if (!(tmp & EEPROM_ADDR_COMPLETE))
2885 tmp = tr32(GRC_EEPROM_DATA);
2888 * The data will always be opposite the native endian
2889 * format. Perform a blind byteswap to compensate.
2896 #define NVRAM_CMD_TIMEOUT 10000
2898 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2902 tw32(NVRAM_CMD, nvram_cmd);
2903 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2905 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2911 if (i == NVRAM_CMD_TIMEOUT)
2917 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2919 if (tg3_flag(tp, NVRAM) &&
2920 tg3_flag(tp, NVRAM_BUFFERED) &&
2921 tg3_flag(tp, FLASH) &&
2922 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2923 (tp->nvram_jedecnum == JEDEC_ATMEL))
2925 addr = ((addr / tp->nvram_pagesize) <<
2926 ATMEL_AT45DB0X1B_PAGE_POS) +
2927 (addr % tp->nvram_pagesize);
2932 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2934 if (tg3_flag(tp, NVRAM) &&
2935 tg3_flag(tp, NVRAM_BUFFERED) &&
2936 tg3_flag(tp, FLASH) &&
2937 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2938 (tp->nvram_jedecnum == JEDEC_ATMEL))
2940 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2941 tp->nvram_pagesize) +
2942 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2947 /* NOTE: Data read in from NVRAM is byteswapped according to
2948 * the byteswapping settings for all other register accesses.
2949 * tg3 devices are BE devices, so on a BE machine, the data
2950 * returned will be exactly as it is seen in NVRAM. On a LE
2951 * machine, the 32-bit value will be byteswapped.
2953 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2957 if (!tg3_flag(tp, NVRAM))
2958 return tg3_nvram_read_using_eeprom(tp, offset, val);
2960 offset = tg3_nvram_phys_addr(tp, offset);
2962 if (offset > NVRAM_ADDR_MSK)
2965 ret = tg3_nvram_lock(tp);
2969 tg3_enable_nvram_access(tp);
2971 tw32(NVRAM_ADDR, offset);
2972 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2973 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2976 *val = tr32(NVRAM_RDDATA);
2978 tg3_disable_nvram_access(tp);
2980 tg3_nvram_unlock(tp);
2985 /* Ensures NVRAM data is in bytestream format. */
2986 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2989 int res = tg3_nvram_read(tp, offset, &v);
2991 *val = cpu_to_be32(v);
2995 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2996 u32 offset, u32 len, u8 *buf)
3001 for (i = 0; i < len; i += 4) {
3007 memcpy(&data, buf + i, 4);
3010 * The SEEPROM interface expects the data to always be opposite
3011 * the native endian format. We accomplish this by reversing
3012 * all the operations that would have been performed on the
3013 * data from a call to tg3_nvram_read_be32().
3015 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3017 val = tr32(GRC_EEPROM_ADDR);
3018 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3020 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3022 tw32(GRC_EEPROM_ADDR, val |
3023 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3024 (addr & EEPROM_ADDR_ADDR_MASK) |
3028 for (j = 0; j < 1000; j++) {
3029 val = tr32(GRC_EEPROM_ADDR);
3031 if (val & EEPROM_ADDR_COMPLETE)
3035 if (!(val & EEPROM_ADDR_COMPLETE)) {
3044 /* offset and length are dword aligned */
3045 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3049 u32 pagesize = tp->nvram_pagesize;
3050 u32 pagemask = pagesize - 1;
3054 tmp = kmalloc(pagesize, GFP_KERNEL);
3060 u32 phy_addr, page_off, size;
3062 phy_addr = offset & ~pagemask;
3064 for (j = 0; j < pagesize; j += 4) {
3065 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3066 (__be32 *) (tmp + j));
3073 page_off = offset & pagemask;
3080 memcpy(tmp + page_off, buf, size);
3082 offset = offset + (pagesize - page_off);
3084 tg3_enable_nvram_access(tp);
3087 * Before we can erase the flash page, we need
3088 * to issue a special "write enable" command.
3090 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3092 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3095 /* Erase the target page */
3096 tw32(NVRAM_ADDR, phy_addr);
3098 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3099 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3101 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3104 /* Issue another write enable to start the write. */
3105 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3107 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3110 for (j = 0; j < pagesize; j += 4) {
3113 data = *((__be32 *) (tmp + j));
3115 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3117 tw32(NVRAM_ADDR, phy_addr + j);
3119 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3123 nvram_cmd |= NVRAM_CMD_FIRST;
3124 else if (j == (pagesize - 4))
3125 nvram_cmd |= NVRAM_CMD_LAST;
3127 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3135 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3136 tg3_nvram_exec_cmd(tp, nvram_cmd);
3143 /* offset and length are dword aligned */
3144 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3149 for (i = 0; i < len; i += 4, offset += 4) {
3150 u32 page_off, phy_addr, nvram_cmd;
3153 memcpy(&data, buf + i, 4);
3154 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3156 page_off = offset % tp->nvram_pagesize;
3158 phy_addr = tg3_nvram_phys_addr(tp, offset);
3160 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3162 if (page_off == 0 || i == 0)
3163 nvram_cmd |= NVRAM_CMD_FIRST;
3164 if (page_off == (tp->nvram_pagesize - 4))
3165 nvram_cmd |= NVRAM_CMD_LAST;
3168 nvram_cmd |= NVRAM_CMD_LAST;
3170 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3171 !tg3_flag(tp, FLASH) ||
3172 !tg3_flag(tp, 57765_PLUS))
3173 tw32(NVRAM_ADDR, phy_addr);
3175 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3176 !tg3_flag(tp, 5755_PLUS) &&
3177 (tp->nvram_jedecnum == JEDEC_ST) &&
3178 (nvram_cmd & NVRAM_CMD_FIRST)) {
3181 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3182 ret = tg3_nvram_exec_cmd(tp, cmd);
3186 if (!tg3_flag(tp, FLASH)) {
3187 /* We always do complete word writes to eeprom. */
3188 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3191 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3198 /* offset and length are dword aligned */
3199 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3203 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3204 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3205 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3209 if (!tg3_flag(tp, NVRAM)) {
3210 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3214 ret = tg3_nvram_lock(tp);
3218 tg3_enable_nvram_access(tp);
3219 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3220 tw32(NVRAM_WRITE1, 0x406);
3222 grc_mode = tr32(GRC_MODE);
3223 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3225 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3226 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3229 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3233 grc_mode = tr32(GRC_MODE);
3234 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3236 tg3_disable_nvram_access(tp);
3237 tg3_nvram_unlock(tp);
3240 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3241 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3248 #define RX_CPU_SCRATCH_BASE 0x30000
3249 #define RX_CPU_SCRATCH_SIZE 0x04000
3250 #define TX_CPU_SCRATCH_BASE 0x34000
3251 #define TX_CPU_SCRATCH_SIZE 0x04000
3253 /* tp->lock is held. */
3254 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3258 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3261 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3263 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3266 if (offset == RX_CPU_BASE) {
3267 for (i = 0; i < 10000; i++) {
3268 tw32(offset + CPU_STATE, 0xffffffff);
3269 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3270 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3274 tw32(offset + CPU_STATE, 0xffffffff);
3275 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3278 for (i = 0; i < 10000; i++) {
3279 tw32(offset + CPU_STATE, 0xffffffff);
3280 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3281 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3287 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3288 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3292 /* Clear firmware's nvram arbitration. */
3293 if (tg3_flag(tp, NVRAM))
3294 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3299 unsigned int fw_base;
3300 unsigned int fw_len;
3301 const __be32 *fw_data;
3304 /* tp->lock is held. */
3305 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3306 u32 cpu_scratch_base, int cpu_scratch_size,
3307 struct fw_info *info)
3309 int err, lock_err, i;
3310 void (*write_op)(struct tg3 *, u32, u32);
3312 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3314 "%s: Trying to load TX cpu firmware which is 5705\n",
3319 if (tg3_flag(tp, 5705_PLUS))
3320 write_op = tg3_write_mem;
3322 write_op = tg3_write_indirect_reg32;
3324 /* It is possible that bootcode is still loading at this point.
3325 * Get the nvram lock first before halting the cpu.
3327 lock_err = tg3_nvram_lock(tp);
3328 err = tg3_halt_cpu(tp, cpu_base);
3330 tg3_nvram_unlock(tp);
3334 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3335 write_op(tp, cpu_scratch_base + i, 0);
3336 tw32(cpu_base + CPU_STATE, 0xffffffff);
3337 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3338 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3339 write_op(tp, (cpu_scratch_base +
3340 (info->fw_base & 0xffff) +
3342 be32_to_cpu(info->fw_data[i]));
3350 /* tp->lock is held. */
3351 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3353 struct fw_info info;
3354 const __be32 *fw_data;
3357 fw_data = (void *)tp->fw->data;
3359 /* Firmware blob starts with version numbers, followed by
3360 start address and length. We are setting complete length.
3361 length = end_address_of_bss - start_address_of_text.
3362 Remainder is the blob to be loaded contiguously
3363 from start address. */
3365 info.fw_base = be32_to_cpu(fw_data[1]);
3366 info.fw_len = tp->fw->size - 12;
3367 info.fw_data = &fw_data[3];
3369 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3370 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3375 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3376 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3381 /* Now startup only the RX cpu. */
3382 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385 for (i = 0; i < 5; i++) {
3386 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3388 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3389 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3390 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3394 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3395 "should be %08x\n", __func__,
3396 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3399 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3400 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3405 /* tp->lock is held. */
3406 static int tg3_load_tso_firmware(struct tg3 *tp)
3408 struct fw_info info;
3409 const __be32 *fw_data;
3410 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3413 if (tg3_flag(tp, HW_TSO_1) ||
3414 tg3_flag(tp, HW_TSO_2) ||
3415 tg3_flag(tp, HW_TSO_3))
3418 fw_data = (void *)tp->fw->data;
3420 /* Firmware blob starts with version numbers, followed by
3421 start address and length. We are setting complete length.
3422 length = end_address_of_bss - start_address_of_text.
3423 Remainder is the blob to be loaded contiguously
3424 from start address. */
3426 info.fw_base = be32_to_cpu(fw_data[1]);
3427 cpu_scratch_size = tp->fw_len;
3428 info.fw_len = tp->fw->size - 12;
3429 info.fw_data = &fw_data[3];
3431 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3432 cpu_base = RX_CPU_BASE;
3433 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3435 cpu_base = TX_CPU_BASE;
3436 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3437 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3440 err = tg3_load_firmware_cpu(tp, cpu_base,
3441 cpu_scratch_base, cpu_scratch_size,
3446 /* Now startup the cpu. */
3447 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448 tw32_f(cpu_base + CPU_PC, info.fw_base);
3450 for (i = 0; i < 5; i++) {
3451 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3453 tw32(cpu_base + CPU_STATE, 0xffffffff);
3454 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3455 tw32_f(cpu_base + CPU_PC, info.fw_base);
3460 "%s fails to set CPU PC, is %08x should be %08x\n",
3461 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3464 tw32(cpu_base + CPU_STATE, 0xffffffff);
3465 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3470 /* tp->lock is held. */
3471 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3473 u32 addr_high, addr_low;
3476 addr_high = ((tp->dev->dev_addr[0] << 8) |
3477 tp->dev->dev_addr[1]);
3478 addr_low = ((tp->dev->dev_addr[2] << 24) |
3479 (tp->dev->dev_addr[3] << 16) |
3480 (tp->dev->dev_addr[4] << 8) |
3481 (tp->dev->dev_addr[5] << 0));
3482 for (i = 0; i < 4; i++) {
3483 if (i == 1 && skip_mac_1)
3485 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3486 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3489 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3490 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3491 for (i = 0; i < 12; i++) {
3492 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3493 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3497 addr_high = (tp->dev->dev_addr[0] +
3498 tp->dev->dev_addr[1] +
3499 tp->dev->dev_addr[2] +
3500 tp->dev->dev_addr[3] +
3501 tp->dev->dev_addr[4] +
3502 tp->dev->dev_addr[5]) &
3503 TX_BACKOFF_SEED_MASK;
3504 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3507 static void tg3_enable_register_access(struct tg3 *tp)
3510 * Make sure register accesses (indirect or otherwise) will function
3513 pci_write_config_dword(tp->pdev,
3514 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3517 static int tg3_power_up(struct tg3 *tp)
3521 tg3_enable_register_access(tp);
3523 err = pci_set_power_state(tp->pdev, PCI_D0);
3525 /* Switch out of Vaux if it is a NIC */
3526 tg3_pwrsrc_switch_to_vmain(tp);
3528 netdev_err(tp->dev, "Transition to D0 failed\n");
3534 static int tg3_setup_phy(struct tg3 *, int);
3536 static int tg3_power_down_prepare(struct tg3 *tp)
3539 bool device_should_wake, do_low_power;
3541 tg3_enable_register_access(tp);
3543 /* Restore the CLKREQ setting. */
3544 if (tg3_flag(tp, CLKREQ_BUG)) {
3547 pci_read_config_word(tp->pdev,
3548 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3550 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3551 pci_write_config_word(tp->pdev,
3552 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3556 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3557 tw32(TG3PCI_MISC_HOST_CTRL,
3558 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3560 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3561 tg3_flag(tp, WOL_ENABLE);
3563 if (tg3_flag(tp, USE_PHYLIB)) {
3564 do_low_power = false;
3565 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3566 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3567 struct phy_device *phydev;
3568 u32 phyid, advertising;
3570 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3572 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3574 tp->link_config.speed = phydev->speed;
3575 tp->link_config.duplex = phydev->duplex;
3576 tp->link_config.autoneg = phydev->autoneg;
3577 tp->link_config.advertising = phydev->advertising;
3579 advertising = ADVERTISED_TP |
3581 ADVERTISED_Autoneg |
3582 ADVERTISED_10baseT_Half;
3584 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3585 if (tg3_flag(tp, WOL_SPEED_100MB))
3587 ADVERTISED_100baseT_Half |
3588 ADVERTISED_100baseT_Full |
3589 ADVERTISED_10baseT_Full;
3591 advertising |= ADVERTISED_10baseT_Full;
3594 phydev->advertising = advertising;
3596 phy_start_aneg(phydev);
3598 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3599 if (phyid != PHY_ID_BCMAC131) {
3600 phyid &= PHY_BCM_OUI_MASK;
3601 if (phyid == PHY_BCM_OUI_1 ||
3602 phyid == PHY_BCM_OUI_2 ||
3603 phyid == PHY_BCM_OUI_3)
3604 do_low_power = true;
3608 do_low_power = true;
3610 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3611 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3613 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3614 tg3_setup_phy(tp, 0);
3617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3620 val = tr32(GRC_VCPU_EXT_CTRL);
3621 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3622 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3626 for (i = 0; i < 200; i++) {
3627 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3628 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3633 if (tg3_flag(tp, WOL_CAP))
3634 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3635 WOL_DRV_STATE_SHUTDOWN |
3639 if (device_should_wake) {
3642 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3644 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3645 tg3_phy_auxctl_write(tp,
3646 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3647 MII_TG3_AUXCTL_PCTL_WOL_EN |
3648 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3649 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3653 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3654 mac_mode = MAC_MODE_PORT_MODE_GMII;
3656 mac_mode = MAC_MODE_PORT_MODE_MII;
3658 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3659 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3661 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3662 SPEED_100 : SPEED_10;
3663 if (tg3_5700_link_polarity(tp, speed))
3664 mac_mode |= MAC_MODE_LINK_POLARITY;
3666 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3669 mac_mode = MAC_MODE_PORT_MODE_TBI;
3672 if (!tg3_flag(tp, 5750_PLUS))
3673 tw32(MAC_LED_CTRL, tp->led_ctrl);
3675 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3676 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3677 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3678 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3680 if (tg3_flag(tp, ENABLE_APE))
3681 mac_mode |= MAC_MODE_APE_TX_EN |
3682 MAC_MODE_APE_RX_EN |
3683 MAC_MODE_TDE_ENABLE;
3685 tw32_f(MAC_MODE, mac_mode);
3688 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3692 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3693 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3694 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3697 base_val = tp->pci_clock_ctrl;
3698 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3699 CLOCK_CTRL_TXCLK_DISABLE);
3701 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3702 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3703 } else if (tg3_flag(tp, 5780_CLASS) ||
3704 tg3_flag(tp, CPMU_PRESENT) ||
3705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3707 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3708 u32 newbits1, newbits2;
3710 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3711 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3712 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3713 CLOCK_CTRL_TXCLK_DISABLE |
3715 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716 } else if (tg3_flag(tp, 5705_PLUS)) {
3717 newbits1 = CLOCK_CTRL_625_CORE;
3718 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3720 newbits1 = CLOCK_CTRL_ALTCLK;
3721 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3724 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3727 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3730 if (!tg3_flag(tp, 5705_PLUS)) {
3733 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3734 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3735 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3736 CLOCK_CTRL_TXCLK_DISABLE |
3737 CLOCK_CTRL_44MHZ_CORE);
3739 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3742 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3743 tp->pci_clock_ctrl | newbits3, 40);
3747 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3748 tg3_power_down_phy(tp, do_low_power);
3750 tg3_frob_aux_power(tp, true);
3752 /* Workaround for unstable PLL clock */
3753 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3754 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3755 u32 val = tr32(0x7d00);
3757 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3759 if (!tg3_flag(tp, ENABLE_ASF)) {
3762 err = tg3_nvram_lock(tp);
3763 tg3_halt_cpu(tp, RX_CPU_BASE);
3765 tg3_nvram_unlock(tp);
3769 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3774 static void tg3_power_down(struct tg3 *tp)
3776 tg3_power_down_prepare(tp);
3778 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3779 pci_set_power_state(tp->pdev, PCI_D3hot);
3782 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3784 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3785 case MII_TG3_AUX_STAT_10HALF:
3787 *duplex = DUPLEX_HALF;
3790 case MII_TG3_AUX_STAT_10FULL:
3792 *duplex = DUPLEX_FULL;
3795 case MII_TG3_AUX_STAT_100HALF:
3797 *duplex = DUPLEX_HALF;
3800 case MII_TG3_AUX_STAT_100FULL:
3802 *duplex = DUPLEX_FULL;
3805 case MII_TG3_AUX_STAT_1000HALF:
3806 *speed = SPEED_1000;
3807 *duplex = DUPLEX_HALF;
3810 case MII_TG3_AUX_STAT_1000FULL:
3811 *speed = SPEED_1000;
3812 *duplex = DUPLEX_FULL;
3816 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3817 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3819 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3823 *speed = SPEED_UNKNOWN;
3824 *duplex = DUPLEX_UNKNOWN;
3829 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3834 new_adv = ADVERTISE_CSMA;
3835 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3836 new_adv |= mii_advertise_flowctrl(flowctrl);
3838 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3842 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3843 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3845 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3846 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3847 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3849 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3854 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3857 tw32(TG3_CPMU_EEE_MODE,
3858 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3860 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3865 /* Advertise 100-BaseTX EEE ability */
3866 if (advertise & ADVERTISED_100baseT_Full)
3867 val |= MDIO_AN_EEE_ADV_100TX;
3868 /* Advertise 1000-BaseT EEE ability */
3869 if (advertise & ADVERTISED_1000baseT_Full)
3870 val |= MDIO_AN_EEE_ADV_1000T;
3871 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3875 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3877 case ASIC_REV_57765:
3878 case ASIC_REV_57766:
3880 /* If we advertised any eee advertisements above... */
3882 val = MII_TG3_DSP_TAP26_ALNOKO |
3883 MII_TG3_DSP_TAP26_RMRXSTO |
3884 MII_TG3_DSP_TAP26_OPCSINPT;
3885 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3888 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3889 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3890 MII_TG3_DSP_CH34TP2_HIBW01);
3893 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3902 static void tg3_phy_copper_begin(struct tg3 *tp)
3904 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3905 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3908 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3909 adv = ADVERTISED_10baseT_Half |
3910 ADVERTISED_10baseT_Full;
3911 if (tg3_flag(tp, WOL_SPEED_100MB))
3912 adv |= ADVERTISED_100baseT_Half |
3913 ADVERTISED_100baseT_Full;
3915 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3917 adv = tp->link_config.advertising;
3918 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3919 adv &= ~(ADVERTISED_1000baseT_Half |
3920 ADVERTISED_1000baseT_Full);
3922 fc = tp->link_config.flowctrl;
3925 tg3_phy_autoneg_cfg(tp, adv, fc);
3927 tg3_writephy(tp, MII_BMCR,
3928 BMCR_ANENABLE | BMCR_ANRESTART);
3931 u32 bmcr, orig_bmcr;
3933 tp->link_config.active_speed = tp->link_config.speed;
3934 tp->link_config.active_duplex = tp->link_config.duplex;
3937 switch (tp->link_config.speed) {
3943 bmcr |= BMCR_SPEED100;
3947 bmcr |= BMCR_SPEED1000;
3951 if (tp->link_config.duplex == DUPLEX_FULL)
3952 bmcr |= BMCR_FULLDPLX;
3954 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3955 (bmcr != orig_bmcr)) {
3956 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3957 for (i = 0; i < 1500; i++) {
3961 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3962 tg3_readphy(tp, MII_BMSR, &tmp))
3964 if (!(tmp & BMSR_LSTATUS)) {
3969 tg3_writephy(tp, MII_BMCR, bmcr);
3975 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3979 /* Turn off tap power management. */
3980 /* Set Extended packet length bit */
3981 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3983 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3984 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3985 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3986 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3987 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3994 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3996 u32 advmsk, tgtadv, advertising;
3998 advertising = tp->link_config.advertising;
3999 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4001 advmsk = ADVERTISE_ALL;
4002 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4003 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4004 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4007 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4010 if ((*lcladv & advmsk) != tgtadv)
4013 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4016 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4018 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4022 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4023 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4024 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4025 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4026 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4028 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4031 if (tg3_ctrl != tgtadv)
4038 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4042 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4045 if (tg3_readphy(tp, MII_STAT1000, &val))
4048 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4051 if (tg3_readphy(tp, MII_LPA, rmtadv))
4054 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4055 tp->link_config.rmt_adv = lpeth;
4060 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4062 int current_link_up;
4064 u32 lcl_adv, rmt_adv;
4072 (MAC_STATUS_SYNC_CHANGED |
4073 MAC_STATUS_CFG_CHANGED |
4074 MAC_STATUS_MI_COMPLETION |
4075 MAC_STATUS_LNKSTATE_CHANGED));
4078 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4080 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4084 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4086 /* Some third-party PHYs need to be reset on link going
4089 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4090 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4091 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4092 netif_carrier_ok(tp->dev)) {
4093 tg3_readphy(tp, MII_BMSR, &bmsr);
4094 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4095 !(bmsr & BMSR_LSTATUS))
4101 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4102 tg3_readphy(tp, MII_BMSR, &bmsr);
4103 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4104 !tg3_flag(tp, INIT_COMPLETE))
4107 if (!(bmsr & BMSR_LSTATUS)) {
4108 err = tg3_init_5401phy_dsp(tp);
4112 tg3_readphy(tp, MII_BMSR, &bmsr);
4113 for (i = 0; i < 1000; i++) {
4115 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4116 (bmsr & BMSR_LSTATUS)) {
4122 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4123 TG3_PHY_REV_BCM5401_B0 &&
4124 !(bmsr & BMSR_LSTATUS) &&
4125 tp->link_config.active_speed == SPEED_1000) {
4126 err = tg3_phy_reset(tp);
4128 err = tg3_init_5401phy_dsp(tp);
4133 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4134 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4135 /* 5701 {A0,B0} CRC bug workaround */
4136 tg3_writephy(tp, 0x15, 0x0a75);
4137 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4138 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4139 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4142 /* Clear pending interrupts... */
4143 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4144 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4146 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4147 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4148 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4149 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4151 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4152 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4153 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4154 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4155 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4157 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4160 current_link_up = 0;
4161 current_speed = SPEED_UNKNOWN;
4162 current_duplex = DUPLEX_UNKNOWN;
4163 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4164 tp->link_config.rmt_adv = 0;
4166 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4167 err = tg3_phy_auxctl_read(tp,
4168 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4170 if (!err && !(val & (1 << 10))) {
4171 tg3_phy_auxctl_write(tp,
4172 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4179 for (i = 0; i < 100; i++) {
4180 tg3_readphy(tp, MII_BMSR, &bmsr);
4181 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4182 (bmsr & BMSR_LSTATUS))
4187 if (bmsr & BMSR_LSTATUS) {
4190 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4191 for (i = 0; i < 2000; i++) {
4193 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4198 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4203 for (i = 0; i < 200; i++) {
4204 tg3_readphy(tp, MII_BMCR, &bmcr);
4205 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4207 if (bmcr && bmcr != 0x7fff)
4215 tp->link_config.active_speed = current_speed;
4216 tp->link_config.active_duplex = current_duplex;
4218 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4219 if ((bmcr & BMCR_ANENABLE) &&
4220 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4221 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4222 current_link_up = 1;
4224 if (!(bmcr & BMCR_ANENABLE) &&
4225 tp->link_config.speed == current_speed &&
4226 tp->link_config.duplex == current_duplex &&
4227 tp->link_config.flowctrl ==
4228 tp->link_config.active_flowctrl) {
4229 current_link_up = 1;
4233 if (current_link_up == 1 &&
4234 tp->link_config.active_duplex == DUPLEX_FULL) {
4237 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4238 reg = MII_TG3_FET_GEN_STAT;
4239 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4241 reg = MII_TG3_EXT_STAT;
4242 bit = MII_TG3_EXT_STAT_MDIX;
4245 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4246 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4248 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4253 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4254 tg3_phy_copper_begin(tp);
4256 tg3_readphy(tp, MII_BMSR, &bmsr);
4257 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4258 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4259 current_link_up = 1;
4262 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4263 if (current_link_up == 1) {
4264 if (tp->link_config.active_speed == SPEED_100 ||
4265 tp->link_config.active_speed == SPEED_10)
4266 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4268 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4269 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4270 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4272 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4274 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4275 if (tp->link_config.active_duplex == DUPLEX_HALF)
4276 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4278 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4279 if (current_link_up == 1 &&
4280 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4281 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4283 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4286 /* ??? Without this setting Netgear GA302T PHY does not
4287 * ??? send/receive packets...
4289 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4290 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4291 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4292 tw32_f(MAC_MI_MODE, tp->mi_mode);
4296 tw32_f(MAC_MODE, tp->mac_mode);
4299 tg3_phy_eee_adjust(tp, current_link_up);
4301 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4302 /* Polled via timer. */
4303 tw32_f(MAC_EVENT, 0);
4305 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4309 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4310 current_link_up == 1 &&
4311 tp->link_config.active_speed == SPEED_1000 &&
4312 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4315 (MAC_STATUS_SYNC_CHANGED |
4316 MAC_STATUS_CFG_CHANGED));
4319 NIC_SRAM_FIRMWARE_MBOX,
4320 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4323 /* Prevent send BD corruption. */
4324 if (tg3_flag(tp, CLKREQ_BUG)) {
4325 u16 oldlnkctl, newlnkctl;
4327 pci_read_config_word(tp->pdev,
4328 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4330 if (tp->link_config.active_speed == SPEED_100 ||
4331 tp->link_config.active_speed == SPEED_10)
4332 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4334 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4335 if (newlnkctl != oldlnkctl)
4336 pci_write_config_word(tp->pdev,
4337 pci_pcie_cap(tp->pdev) +
4338 PCI_EXP_LNKCTL, newlnkctl);
4341 if (current_link_up != netif_carrier_ok(tp->dev)) {
4342 if (current_link_up)
4343 netif_carrier_on(tp->dev);
4345 netif_carrier_off(tp->dev);
4346 tg3_link_report(tp);
4352 struct tg3_fiber_aneginfo {
4354 #define ANEG_STATE_UNKNOWN 0
4355 #define ANEG_STATE_AN_ENABLE 1
4356 #define ANEG_STATE_RESTART_INIT 2
4357 #define ANEG_STATE_RESTART 3
4358 #define ANEG_STATE_DISABLE_LINK_OK 4
4359 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4360 #define ANEG_STATE_ABILITY_DETECT 6
4361 #define ANEG_STATE_ACK_DETECT_INIT 7
4362 #define ANEG_STATE_ACK_DETECT 8
4363 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4364 #define ANEG_STATE_COMPLETE_ACK 10
4365 #define ANEG_STATE_IDLE_DETECT_INIT 11
4366 #define ANEG_STATE_IDLE_DETECT 12
4367 #define ANEG_STATE_LINK_OK 13
4368 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4369 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4372 #define MR_AN_ENABLE 0x00000001
4373 #define MR_RESTART_AN 0x00000002
4374 #define MR_AN_COMPLETE 0x00000004
4375 #define MR_PAGE_RX 0x00000008
4376 #define MR_NP_LOADED 0x00000010
4377 #define MR_TOGGLE_TX 0x00000020
4378 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4379 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4380 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4381 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4382 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4383 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4384 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4385 #define MR_TOGGLE_RX 0x00002000
4386 #define MR_NP_RX 0x00004000
4388 #define MR_LINK_OK 0x80000000
4390 unsigned long link_time, cur_time;
4392 u32 ability_match_cfg;
4393 int ability_match_count;
4395 char ability_match, idle_match, ack_match;
4397 u32 txconfig, rxconfig;
4398 #define ANEG_CFG_NP 0x00000080
4399 #define ANEG_CFG_ACK 0x00000040
4400 #define ANEG_CFG_RF2 0x00000020
4401 #define ANEG_CFG_RF1 0x00000010
4402 #define ANEG_CFG_PS2 0x00000001
4403 #define ANEG_CFG_PS1 0x00008000
4404 #define ANEG_CFG_HD 0x00004000
4405 #define ANEG_CFG_FD 0x00002000
4406 #define ANEG_CFG_INVAL 0x00001f06
4411 #define ANEG_TIMER_ENAB 2
4412 #define ANEG_FAILED -1
4414 #define ANEG_STATE_SETTLE_TIME 10000
4416 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4417 struct tg3_fiber_aneginfo *ap)
4420 unsigned long delta;
4424 if (ap->state == ANEG_STATE_UNKNOWN) {
4428 ap->ability_match_cfg = 0;
4429 ap->ability_match_count = 0;
4430 ap->ability_match = 0;
4436 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4437 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4439 if (rx_cfg_reg != ap->ability_match_cfg) {
4440 ap->ability_match_cfg = rx_cfg_reg;
4441 ap->ability_match = 0;
4442 ap->ability_match_count = 0;
4444 if (++ap->ability_match_count > 1) {
4445 ap->ability_match = 1;
4446 ap->ability_match_cfg = rx_cfg_reg;
4449 if (rx_cfg_reg & ANEG_CFG_ACK)
4457 ap->ability_match_cfg = 0;
4458 ap->ability_match_count = 0;
4459 ap->ability_match = 0;
4465 ap->rxconfig = rx_cfg_reg;
4468 switch (ap->state) {
4469 case ANEG_STATE_UNKNOWN:
4470 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4471 ap->state = ANEG_STATE_AN_ENABLE;
4474 case ANEG_STATE_AN_ENABLE:
4475 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4476 if (ap->flags & MR_AN_ENABLE) {
4479 ap->ability_match_cfg = 0;
4480 ap->ability_match_count = 0;
4481 ap->ability_match = 0;
4485 ap->state = ANEG_STATE_RESTART_INIT;
4487 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4491 case ANEG_STATE_RESTART_INIT:
4492 ap->link_time = ap->cur_time;
4493 ap->flags &= ~(MR_NP_LOADED);
4495 tw32(MAC_TX_AUTO_NEG, 0);
4496 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4497 tw32_f(MAC_MODE, tp->mac_mode);
4500 ret = ANEG_TIMER_ENAB;
4501 ap->state = ANEG_STATE_RESTART;
4504 case ANEG_STATE_RESTART:
4505 delta = ap->cur_time - ap->link_time;
4506 if (delta > ANEG_STATE_SETTLE_TIME)
4507 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4509 ret = ANEG_TIMER_ENAB;
4512 case ANEG_STATE_DISABLE_LINK_OK:
4516 case ANEG_STATE_ABILITY_DETECT_INIT:
4517 ap->flags &= ~(MR_TOGGLE_TX);
4518 ap->txconfig = ANEG_CFG_FD;
4519 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4520 if (flowctrl & ADVERTISE_1000XPAUSE)
4521 ap->txconfig |= ANEG_CFG_PS1;
4522 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4523 ap->txconfig |= ANEG_CFG_PS2;
4524 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4525 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4526 tw32_f(MAC_MODE, tp->mac_mode);
4529 ap->state = ANEG_STATE_ABILITY_DETECT;
4532 case ANEG_STATE_ABILITY_DETECT:
4533 if (ap->ability_match != 0 && ap->rxconfig != 0)
4534 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4537 case ANEG_STATE_ACK_DETECT_INIT:
4538 ap->txconfig |= ANEG_CFG_ACK;
4539 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4540 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4541 tw32_f(MAC_MODE, tp->mac_mode);
4544 ap->state = ANEG_STATE_ACK_DETECT;
4547 case ANEG_STATE_ACK_DETECT:
4548 if (ap->ack_match != 0) {
4549 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4550 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4551 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4553 ap->state = ANEG_STATE_AN_ENABLE;
4555 } else if (ap->ability_match != 0 &&
4556 ap->rxconfig == 0) {
4557 ap->state = ANEG_STATE_AN_ENABLE;
4561 case ANEG_STATE_COMPLETE_ACK_INIT:
4562 if (ap->rxconfig & ANEG_CFG_INVAL) {
4566 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4567 MR_LP_ADV_HALF_DUPLEX |
4568 MR_LP_ADV_SYM_PAUSE |
4569 MR_LP_ADV_ASYM_PAUSE |
4570 MR_LP_ADV_REMOTE_FAULT1 |
4571 MR_LP_ADV_REMOTE_FAULT2 |
4572 MR_LP_ADV_NEXT_PAGE |
4575 if (ap->rxconfig & ANEG_CFG_FD)
4576 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4577 if (ap->rxconfig & ANEG_CFG_HD)
4578 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4579 if (ap->rxconfig & ANEG_CFG_PS1)
4580 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4581 if (ap->rxconfig & ANEG_CFG_PS2)
4582 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4583 if (ap->rxconfig & ANEG_CFG_RF1)
4584 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4585 if (ap->rxconfig & ANEG_CFG_RF2)
4586 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4587 if (ap->rxconfig & ANEG_CFG_NP)
4588 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4590 ap->link_time = ap->cur_time;
4592 ap->flags ^= (MR_TOGGLE_TX);
4593 if (ap->rxconfig & 0x0008)
4594 ap->flags |= MR_TOGGLE_RX;
4595 if (ap->rxconfig & ANEG_CFG_NP)
4596 ap->flags |= MR_NP_RX;
4597 ap->flags |= MR_PAGE_RX;
4599 ap->state = ANEG_STATE_COMPLETE_ACK;
4600 ret = ANEG_TIMER_ENAB;
4603 case ANEG_STATE_COMPLETE_ACK:
4604 if (ap->ability_match != 0 &&
4605 ap->rxconfig == 0) {
4606 ap->state = ANEG_STATE_AN_ENABLE;
4609 delta = ap->cur_time - ap->link_time;
4610 if (delta > ANEG_STATE_SETTLE_TIME) {
4611 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4612 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4614 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4615 !(ap->flags & MR_NP_RX)) {
4616 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4624 case ANEG_STATE_IDLE_DETECT_INIT:
4625 ap->link_time = ap->cur_time;
4626 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4627 tw32_f(MAC_MODE, tp->mac_mode);
4630 ap->state = ANEG_STATE_IDLE_DETECT;
4631 ret = ANEG_TIMER_ENAB;
4634 case ANEG_STATE_IDLE_DETECT:
4635 if (ap->ability_match != 0 &&
4636 ap->rxconfig == 0) {
4637 ap->state = ANEG_STATE_AN_ENABLE;
4640 delta = ap->cur_time - ap->link_time;
4641 if (delta > ANEG_STATE_SETTLE_TIME) {
4642 /* XXX another gem from the Broadcom driver :( */
4643 ap->state = ANEG_STATE_LINK_OK;
4647 case ANEG_STATE_LINK_OK:
4648 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4652 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4653 /* ??? unimplemented */
4656 case ANEG_STATE_NEXT_PAGE_WAIT:
4657 /* ??? unimplemented */
4668 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4671 struct tg3_fiber_aneginfo aninfo;
4672 int status = ANEG_FAILED;
4676 tw32_f(MAC_TX_AUTO_NEG, 0);
4678 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4679 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4682 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4685 memset(&aninfo, 0, sizeof(aninfo));
4686 aninfo.flags |= MR_AN_ENABLE;
4687 aninfo.state = ANEG_STATE_UNKNOWN;
4688 aninfo.cur_time = 0;
4690 while (++tick < 195000) {
4691 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4692 if (status == ANEG_DONE || status == ANEG_FAILED)
4698 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4699 tw32_f(MAC_MODE, tp->mac_mode);
4702 *txflags = aninfo.txconfig;
4703 *rxflags = aninfo.flags;
4705 if (status == ANEG_DONE &&
4706 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4707 MR_LP_ADV_FULL_DUPLEX)))
4713 static void tg3_init_bcm8002(struct tg3 *tp)
4715 u32 mac_status = tr32(MAC_STATUS);
4718 /* Reset when initting first time or we have a link. */
4719 if (tg3_flag(tp, INIT_COMPLETE) &&
4720 !(mac_status & MAC_STATUS_PCS_SYNCED))
4723 /* Set PLL lock range. */
4724 tg3_writephy(tp, 0x16, 0x8007);
4727 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4729 /* Wait for reset to complete. */
4730 /* XXX schedule_timeout() ... */
4731 for (i = 0; i < 500; i++)
4734 /* Config mode; select PMA/Ch 1 regs. */
4735 tg3_writephy(tp, 0x10, 0x8411);
4737 /* Enable auto-lock and comdet, select txclk for tx. */
4738 tg3_writephy(tp, 0x11, 0x0a10);
4740 tg3_writephy(tp, 0x18, 0x00a0);
4741 tg3_writephy(tp, 0x16, 0x41ff);
4743 /* Assert and deassert POR. */
4744 tg3_writephy(tp, 0x13, 0x0400);
4746 tg3_writephy(tp, 0x13, 0x0000);
4748 tg3_writephy(tp, 0x11, 0x0a50);
4750 tg3_writephy(tp, 0x11, 0x0a10);
4752 /* Wait for signal to stabilize */
4753 /* XXX schedule_timeout() ... */
4754 for (i = 0; i < 15000; i++)
4757 /* Deselect the channel register so we can read the PHYID
4760 tg3_writephy(tp, 0x10, 0x8011);
4763 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4766 u32 sg_dig_ctrl, sg_dig_status;
4767 u32 serdes_cfg, expected_sg_dig_ctrl;
4768 int workaround, port_a;
4769 int current_link_up;
4772 expected_sg_dig_ctrl = 0;
4775 current_link_up = 0;
4777 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4778 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4780 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4783 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4784 /* preserve bits 20-23 for voltage regulator */
4785 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4788 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4790 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4791 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4793 u32 val = serdes_cfg;
4799 tw32_f(MAC_SERDES_CFG, val);
4802 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4804 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4805 tg3_setup_flow_control(tp, 0, 0);
4806 current_link_up = 1;
4811 /* Want auto-negotiation. */
4812 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4814 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4815 if (flowctrl & ADVERTISE_1000XPAUSE)
4816 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4817 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4818 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4820 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4821 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4822 tp->serdes_counter &&
4823 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4824 MAC_STATUS_RCVD_CFG)) ==
4825 MAC_STATUS_PCS_SYNCED)) {
4826 tp->serdes_counter--;
4827 current_link_up = 1;
4832 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4833 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4835 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4837 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4838 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4839 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4840 MAC_STATUS_SIGNAL_DET)) {
4841 sg_dig_status = tr32(SG_DIG_STATUS);
4842 mac_status = tr32(MAC_STATUS);
4844 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4845 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4846 u32 local_adv = 0, remote_adv = 0;
4848 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4849 local_adv |= ADVERTISE_1000XPAUSE;
4850 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4851 local_adv |= ADVERTISE_1000XPSE_ASYM;
4853 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4854 remote_adv |= LPA_1000XPAUSE;
4855 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4856 remote_adv |= LPA_1000XPAUSE_ASYM;
4858 tp->link_config.rmt_adv =
4859 mii_adv_to_ethtool_adv_x(remote_adv);
4861 tg3_setup_flow_control(tp, local_adv, remote_adv);
4862 current_link_up = 1;
4863 tp->serdes_counter = 0;
4864 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4865 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4866 if (tp->serdes_counter)
4867 tp->serdes_counter--;
4870 u32 val = serdes_cfg;
4877 tw32_f(MAC_SERDES_CFG, val);
4880 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4883 /* Link parallel detection - link is up */
4884 /* only if we have PCS_SYNC and not */
4885 /* receiving config code words */
4886 mac_status = tr32(MAC_STATUS);
4887 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4888 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4889 tg3_setup_flow_control(tp, 0, 0);
4890 current_link_up = 1;
4892 TG3_PHYFLG_PARALLEL_DETECT;
4893 tp->serdes_counter =
4894 SERDES_PARALLEL_DET_TIMEOUT;
4896 goto restart_autoneg;
4900 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4901 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4905 return current_link_up;
4908 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4910 int current_link_up = 0;
4912 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4915 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4916 u32 txflags, rxflags;
4919 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4920 u32 local_adv = 0, remote_adv = 0;
4922 if (txflags & ANEG_CFG_PS1)
4923 local_adv |= ADVERTISE_1000XPAUSE;
4924 if (txflags & ANEG_CFG_PS2)
4925 local_adv |= ADVERTISE_1000XPSE_ASYM;
4927 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4928 remote_adv |= LPA_1000XPAUSE;
4929 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4930 remote_adv |= LPA_1000XPAUSE_ASYM;
4932 tp->link_config.rmt_adv =
4933 mii_adv_to_ethtool_adv_x(remote_adv);
4935 tg3_setup_flow_control(tp, local_adv, remote_adv);
4937 current_link_up = 1;
4939 for (i = 0; i < 30; i++) {
4942 (MAC_STATUS_SYNC_CHANGED |
4943 MAC_STATUS_CFG_CHANGED));
4945 if ((tr32(MAC_STATUS) &
4946 (MAC_STATUS_SYNC_CHANGED |
4947 MAC_STATUS_CFG_CHANGED)) == 0)
4951 mac_status = tr32(MAC_STATUS);
4952 if (current_link_up == 0 &&
4953 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4954 !(mac_status & MAC_STATUS_RCVD_CFG))
4955 current_link_up = 1;
4957 tg3_setup_flow_control(tp, 0, 0);
4959 /* Forcing 1000FD link up. */
4960 current_link_up = 1;
4962 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4965 tw32_f(MAC_MODE, tp->mac_mode);
4970 return current_link_up;
4973 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4976 u16 orig_active_speed;
4977 u8 orig_active_duplex;
4979 int current_link_up;
4982 orig_pause_cfg = tp->link_config.active_flowctrl;
4983 orig_active_speed = tp->link_config.active_speed;
4984 orig_active_duplex = tp->link_config.active_duplex;
4986 if (!tg3_flag(tp, HW_AUTONEG) &&
4987 netif_carrier_ok(tp->dev) &&
4988 tg3_flag(tp, INIT_COMPLETE)) {
4989 mac_status = tr32(MAC_STATUS);
4990 mac_status &= (MAC_STATUS_PCS_SYNCED |
4991 MAC_STATUS_SIGNAL_DET |
4992 MAC_STATUS_CFG_CHANGED |
4993 MAC_STATUS_RCVD_CFG);
4994 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4995 MAC_STATUS_SIGNAL_DET)) {
4996 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4997 MAC_STATUS_CFG_CHANGED));
5002 tw32_f(MAC_TX_AUTO_NEG, 0);
5004 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5005 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5006 tw32_f(MAC_MODE, tp->mac_mode);
5009 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5010 tg3_init_bcm8002(tp);
5012 /* Enable link change event even when serdes polling. */
5013 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5016 current_link_up = 0;
5017 tp->link_config.rmt_adv = 0;
5018 mac_status = tr32(MAC_STATUS);
5020 if (tg3_flag(tp, HW_AUTONEG))
5021 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5023 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5025 tp->napi[0].hw_status->status =
5026 (SD_STATUS_UPDATED |
5027 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5029 for (i = 0; i < 100; i++) {
5030 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5031 MAC_STATUS_CFG_CHANGED));
5033 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5034 MAC_STATUS_CFG_CHANGED |
5035 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5039 mac_status = tr32(MAC_STATUS);
5040 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5041 current_link_up = 0;
5042 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5043 tp->serdes_counter == 0) {
5044 tw32_f(MAC_MODE, (tp->mac_mode |
5045 MAC_MODE_SEND_CONFIGS));
5047 tw32_f(MAC_MODE, tp->mac_mode);
5051 if (current_link_up == 1) {
5052 tp->link_config.active_speed = SPEED_1000;
5053 tp->link_config.active_duplex = DUPLEX_FULL;
5054 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055 LED_CTRL_LNKLED_OVERRIDE |
5056 LED_CTRL_1000MBPS_ON));
5058 tp->link_config.active_speed = SPEED_UNKNOWN;
5059 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5060 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5061 LED_CTRL_LNKLED_OVERRIDE |
5062 LED_CTRL_TRAFFIC_OVERRIDE));
5065 if (current_link_up != netif_carrier_ok(tp->dev)) {
5066 if (current_link_up)
5067 netif_carrier_on(tp->dev);
5069 netif_carrier_off(tp->dev);
5070 tg3_link_report(tp);
5072 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5073 if (orig_pause_cfg != now_pause_cfg ||
5074 orig_active_speed != tp->link_config.active_speed ||
5075 orig_active_duplex != tp->link_config.active_duplex)
5076 tg3_link_report(tp);
5082 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5084 int current_link_up, err = 0;
5088 u32 local_adv, remote_adv;
5090 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5091 tw32_f(MAC_MODE, tp->mac_mode);
5097 (MAC_STATUS_SYNC_CHANGED |
5098 MAC_STATUS_CFG_CHANGED |
5099 MAC_STATUS_MI_COMPLETION |
5100 MAC_STATUS_LNKSTATE_CHANGED));
5106 current_link_up = 0;
5107 current_speed = SPEED_UNKNOWN;
5108 current_duplex = DUPLEX_UNKNOWN;
5109 tp->link_config.rmt_adv = 0;
5111 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5112 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5113 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5114 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5115 bmsr |= BMSR_LSTATUS;
5117 bmsr &= ~BMSR_LSTATUS;
5120 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5122 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5123 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5124 /* do nothing, just check for link up at the end */
5125 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5128 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5129 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5130 ADVERTISE_1000XPAUSE |
5131 ADVERTISE_1000XPSE_ASYM |
5134 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5135 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5137 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5138 tg3_writephy(tp, MII_ADVERTISE, newadv);
5139 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5140 tg3_writephy(tp, MII_BMCR, bmcr);
5142 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5143 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5144 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5151 bmcr &= ~BMCR_SPEED1000;
5152 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5154 if (tp->link_config.duplex == DUPLEX_FULL)
5155 new_bmcr |= BMCR_FULLDPLX;
5157 if (new_bmcr != bmcr) {
5158 /* BMCR_SPEED1000 is a reserved bit that needs
5159 * to be set on write.
5161 new_bmcr |= BMCR_SPEED1000;
5163 /* Force a linkdown */
5164 if (netif_carrier_ok(tp->dev)) {
5167 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5168 adv &= ~(ADVERTISE_1000XFULL |
5169 ADVERTISE_1000XHALF |
5171 tg3_writephy(tp, MII_ADVERTISE, adv);
5172 tg3_writephy(tp, MII_BMCR, bmcr |
5176 netif_carrier_off(tp->dev);
5178 tg3_writephy(tp, MII_BMCR, new_bmcr);
5180 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5181 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5182 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5184 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5185 bmsr |= BMSR_LSTATUS;
5187 bmsr &= ~BMSR_LSTATUS;
5189 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5193 if (bmsr & BMSR_LSTATUS) {
5194 current_speed = SPEED_1000;
5195 current_link_up = 1;
5196 if (bmcr & BMCR_FULLDPLX)
5197 current_duplex = DUPLEX_FULL;
5199 current_duplex = DUPLEX_HALF;
5204 if (bmcr & BMCR_ANENABLE) {
5207 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5208 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5209 common = local_adv & remote_adv;
5210 if (common & (ADVERTISE_1000XHALF |
5211 ADVERTISE_1000XFULL)) {
5212 if (common & ADVERTISE_1000XFULL)
5213 current_duplex = DUPLEX_FULL;
5215 current_duplex = DUPLEX_HALF;
5217 tp->link_config.rmt_adv =
5218 mii_adv_to_ethtool_adv_x(remote_adv);
5219 } else if (!tg3_flag(tp, 5780_CLASS)) {
5220 /* Link is up via parallel detect */
5222 current_link_up = 0;
5227 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5228 tg3_setup_flow_control(tp, local_adv, remote_adv);
5230 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5231 if (tp->link_config.active_duplex == DUPLEX_HALF)
5232 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5234 tw32_f(MAC_MODE, tp->mac_mode);
5237 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5239 tp->link_config.active_speed = current_speed;
5240 tp->link_config.active_duplex = current_duplex;
5242 if (current_link_up != netif_carrier_ok(tp->dev)) {
5243 if (current_link_up)
5244 netif_carrier_on(tp->dev);
5246 netif_carrier_off(tp->dev);
5247 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5249 tg3_link_report(tp);
5254 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5256 if (tp->serdes_counter) {
5257 /* Give autoneg time to complete. */
5258 tp->serdes_counter--;
5262 if (!netif_carrier_ok(tp->dev) &&
5263 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5266 tg3_readphy(tp, MII_BMCR, &bmcr);
5267 if (bmcr & BMCR_ANENABLE) {
5270 /* Select shadow register 0x1f */
5271 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5272 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5274 /* Select expansion interrupt status register */
5275 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5276 MII_TG3_DSP_EXP1_INT_STAT);
5277 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5278 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5280 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5281 /* We have signal detect and not receiving
5282 * config code words, link is up by parallel
5286 bmcr &= ~BMCR_ANENABLE;
5287 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5288 tg3_writephy(tp, MII_BMCR, bmcr);
5289 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5292 } else if (netif_carrier_ok(tp->dev) &&
5293 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5294 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5297 /* Select expansion interrupt status register */
5298 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5299 MII_TG3_DSP_EXP1_INT_STAT);
5300 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5304 /* Config code words received, turn on autoneg. */
5305 tg3_readphy(tp, MII_BMCR, &bmcr);
5306 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5308 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5314 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5319 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5320 err = tg3_setup_fiber_phy(tp, force_reset);
5321 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5322 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5324 err = tg3_setup_copper_phy(tp, force_reset);
5326 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5329 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5330 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5332 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5337 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5338 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5339 tw32(GRC_MISC_CFG, val);
5342 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5343 (6 << TX_LENGTHS_IPG_SHIFT);
5344 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5345 val |= tr32(MAC_TX_LENGTHS) &
5346 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5347 TX_LENGTHS_CNT_DWN_VAL_MSK);
5349 if (tp->link_config.active_speed == SPEED_1000 &&
5350 tp->link_config.active_duplex == DUPLEX_HALF)
5351 tw32(MAC_TX_LENGTHS, val |
5352 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5354 tw32(MAC_TX_LENGTHS, val |
5355 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5357 if (!tg3_flag(tp, 5705_PLUS)) {
5358 if (netif_carrier_ok(tp->dev)) {
5359 tw32(HOSTCC_STAT_COAL_TICKS,
5360 tp->coal.stats_block_coalesce_usecs);
5362 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5366 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5367 val = tr32(PCIE_PWR_MGMT_THRESH);
5368 if (!netif_carrier_ok(tp->dev))
5369 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5372 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5373 tw32(PCIE_PWR_MGMT_THRESH, val);
5379 static inline int tg3_irq_sync(struct tg3 *tp)
5381 return tp->irq_sync;
5384 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5388 dst = (u32 *)((u8 *)dst + off);
5389 for (i = 0; i < len; i += sizeof(u32))
5390 *dst++ = tr32(off + i);
5393 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5395 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5396 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5397 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5398 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5399 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5400 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5401 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5402 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5403 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5404 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5405 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5406 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5407 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5408 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5409 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5410 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5411 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5412 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5413 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5415 if (tg3_flag(tp, SUPPORT_MSIX))
5416 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5418 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5419 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5420 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5421 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5422 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5423 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5424 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5425 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5427 if (!tg3_flag(tp, 5705_PLUS)) {
5428 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5429 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5430 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5433 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5434 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5435 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5436 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5437 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5439 if (tg3_flag(tp, NVRAM))
5440 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5443 static void tg3_dump_state(struct tg3 *tp)
5448 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5450 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5454 if (tg3_flag(tp, PCI_EXPRESS)) {
5455 /* Read up to but not including private PCI registers */
5456 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5457 regs[i / sizeof(u32)] = tr32(i);
5459 tg3_dump_legacy_regs(tp, regs);
5461 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5462 if (!regs[i + 0] && !regs[i + 1] &&
5463 !regs[i + 2] && !regs[i + 3])
5466 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5468 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5473 for (i = 0; i < tp->irq_cnt; i++) {
5474 struct tg3_napi *tnapi = &tp->napi[i];
5476 /* SW status block */
5478 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5480 tnapi->hw_status->status,
5481 tnapi->hw_status->status_tag,
5482 tnapi->hw_status->rx_jumbo_consumer,
5483 tnapi->hw_status->rx_consumer,
5484 tnapi->hw_status->rx_mini_consumer,
5485 tnapi->hw_status->idx[0].rx_producer,
5486 tnapi->hw_status->idx[0].tx_consumer);
5489 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5491 tnapi->last_tag, tnapi->last_irq_tag,
5492 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5494 tnapi->prodring.rx_std_prod_idx,
5495 tnapi->prodring.rx_std_cons_idx,
5496 tnapi->prodring.rx_jmb_prod_idx,
5497 tnapi->prodring.rx_jmb_cons_idx);
5501 /* This is called whenever we suspect that the system chipset is re-
5502 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5503 * is bogus tx completions. We try to recover by setting the
5504 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5507 static void tg3_tx_recover(struct tg3 *tp)
5509 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5510 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5512 netdev_warn(tp->dev,
5513 "The system may be re-ordering memory-mapped I/O "
5514 "cycles to the network device, attempting to recover. "
5515 "Please report the problem to the driver maintainer "
5516 "and include system chipset information.\n");
5518 spin_lock(&tp->lock);
5519 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5520 spin_unlock(&tp->lock);
5523 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5525 /* Tell compiler to fetch tx indices from memory. */
5527 return tnapi->tx_pending -
5528 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5531 /* Tigon3 never reports partial packet sends. So we do not
5532 * need special logic to handle SKBs that have not had all
5533 * of their frags sent yet, like SunGEM does.
5535 static void tg3_tx(struct tg3_napi *tnapi)
5537 struct tg3 *tp = tnapi->tp;
5538 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5539 u32 sw_idx = tnapi->tx_cons;
5540 struct netdev_queue *txq;
5541 int index = tnapi - tp->napi;
5542 unsigned int pkts_compl = 0, bytes_compl = 0;
5544 if (tg3_flag(tp, ENABLE_TSS))
5547 txq = netdev_get_tx_queue(tp->dev, index);
5549 while (sw_idx != hw_idx) {
5550 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5551 struct sk_buff *skb = ri->skb;
5554 if (unlikely(skb == NULL)) {
5559 pci_unmap_single(tp->pdev,
5560 dma_unmap_addr(ri, mapping),
5566 while (ri->fragmented) {
5567 ri->fragmented = false;
5568 sw_idx = NEXT_TX(sw_idx);
5569 ri = &tnapi->tx_buffers[sw_idx];
5572 sw_idx = NEXT_TX(sw_idx);
5574 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5575 ri = &tnapi->tx_buffers[sw_idx];
5576 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5579 pci_unmap_page(tp->pdev,
5580 dma_unmap_addr(ri, mapping),
5581 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5584 while (ri->fragmented) {
5585 ri->fragmented = false;
5586 sw_idx = NEXT_TX(sw_idx);
5587 ri = &tnapi->tx_buffers[sw_idx];
5590 sw_idx = NEXT_TX(sw_idx);
5594 bytes_compl += skb->len;
5598 if (unlikely(tx_bug)) {
5604 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5606 tnapi->tx_cons = sw_idx;
5608 /* Need to make the tx_cons update visible to tg3_start_xmit()
5609 * before checking for netif_queue_stopped(). Without the
5610 * memory barrier, there is a small possibility that tg3_start_xmit()
5611 * will miss it and cause the queue to be stopped forever.
5615 if (unlikely(netif_tx_queue_stopped(txq) &&
5616 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5617 __netif_tx_lock(txq, smp_processor_id());
5618 if (netif_tx_queue_stopped(txq) &&
5619 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5620 netif_tx_wake_queue(txq);
5621 __netif_tx_unlock(txq);
5625 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5630 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5631 map_sz, PCI_DMA_FROMDEVICE);
5636 /* Returns size of skb allocated or < 0 on error.
5638 * We only need to fill in the address because the other members
5639 * of the RX descriptor are invariant, see tg3_init_rings.
5641 * Note the purposeful assymetry of cpu vs. chip accesses. For
5642 * posting buffers we only dirty the first cache line of the RX
5643 * descriptor (containing the address). Whereas for the RX status
5644 * buffers the cpu only reads the last cacheline of the RX descriptor
5645 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5647 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5648 u32 opaque_key, u32 dest_idx_unmasked)
5650 struct tg3_rx_buffer_desc *desc;
5651 struct ring_info *map;
5654 int skb_size, data_size, dest_idx;
5656 switch (opaque_key) {
5657 case RXD_OPAQUE_RING_STD:
5658 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5659 desc = &tpr->rx_std[dest_idx];
5660 map = &tpr->rx_std_buffers[dest_idx];
5661 data_size = tp->rx_pkt_map_sz;
5664 case RXD_OPAQUE_RING_JUMBO:
5665 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5666 desc = &tpr->rx_jmb[dest_idx].std;
5667 map = &tpr->rx_jmb_buffers[dest_idx];
5668 data_size = TG3_RX_JMB_MAP_SZ;
5675 /* Do not overwrite any of the map or rp information
5676 * until we are sure we can commit to a new buffer.
5678 * Callers depend upon this behavior and assume that
5679 * we leave everything unchanged if we fail.
5681 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5682 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5683 data = kmalloc(skb_size, GFP_ATOMIC);
5687 mapping = pci_map_single(tp->pdev,
5688 data + TG3_RX_OFFSET(tp),
5690 PCI_DMA_FROMDEVICE);
5691 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5697 dma_unmap_addr_set(map, mapping, mapping);
5699 desc->addr_hi = ((u64)mapping >> 32);
5700 desc->addr_lo = ((u64)mapping & 0xffffffff);
5705 /* We only need to move over in the address because the other
5706 * members of the RX descriptor are invariant. See notes above
5707 * tg3_alloc_rx_data for full details.
5709 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5710 struct tg3_rx_prodring_set *dpr,
5711 u32 opaque_key, int src_idx,
5712 u32 dest_idx_unmasked)
5714 struct tg3 *tp = tnapi->tp;
5715 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5716 struct ring_info *src_map, *dest_map;
5717 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5720 switch (opaque_key) {
5721 case RXD_OPAQUE_RING_STD:
5722 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5723 dest_desc = &dpr->rx_std[dest_idx];
5724 dest_map = &dpr->rx_std_buffers[dest_idx];
5725 src_desc = &spr->rx_std[src_idx];
5726 src_map = &spr->rx_std_buffers[src_idx];
5729 case RXD_OPAQUE_RING_JUMBO:
5730 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5731 dest_desc = &dpr->rx_jmb[dest_idx].std;
5732 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5733 src_desc = &spr->rx_jmb[src_idx].std;
5734 src_map = &spr->rx_jmb_buffers[src_idx];
5741 dest_map->data = src_map->data;
5742 dma_unmap_addr_set(dest_map, mapping,
5743 dma_unmap_addr(src_map, mapping));
5744 dest_desc->addr_hi = src_desc->addr_hi;
5745 dest_desc->addr_lo = src_desc->addr_lo;
5747 /* Ensure that the update to the skb happens after the physical
5748 * addresses have been transferred to the new BD location.
5752 src_map->data = NULL;
5755 /* The RX ring scheme is composed of multiple rings which post fresh
5756 * buffers to the chip, and one special ring the chip uses to report
5757 * status back to the host.
5759 * The special ring reports the status of received packets to the
5760 * host. The chip does not write into the original descriptor the
5761 * RX buffer was obtained from. The chip simply takes the original
5762 * descriptor as provided by the host, updates the status and length
5763 * field, then writes this into the next status ring entry.
5765 * Each ring the host uses to post buffers to the chip is described
5766 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5767 * it is first placed into the on-chip ram. When the packet's length
5768 * is known, it walks down the TG3_BDINFO entries to select the ring.
5769 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5770 * which is within the range of the new packet's length is chosen.
5772 * The "separate ring for rx status" scheme may sound queer, but it makes
5773 * sense from a cache coherency perspective. If only the host writes
5774 * to the buffer post rings, and only the chip writes to the rx status
5775 * rings, then cache lines never move beyond shared-modified state.
5776 * If both the host and chip were to write into the same ring, cache line
5777 * eviction could occur since both entities want it in an exclusive state.
5779 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5781 struct tg3 *tp = tnapi->tp;
5782 u32 work_mask, rx_std_posted = 0;
5783 u32 std_prod_idx, jmb_prod_idx;
5784 u32 sw_idx = tnapi->rx_rcb_ptr;
5787 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5789 hw_idx = *(tnapi->rx_rcb_prod_idx);
5791 * We need to order the read of hw_idx and the read of
5792 * the opaque cookie.
5797 std_prod_idx = tpr->rx_std_prod_idx;
5798 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5799 while (sw_idx != hw_idx && budget > 0) {
5800 struct ring_info *ri;
5801 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5803 struct sk_buff *skb;
5804 dma_addr_t dma_addr;
5805 u32 opaque_key, desc_idx, *post_ptr;
5808 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5809 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5810 if (opaque_key == RXD_OPAQUE_RING_STD) {
5811 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5812 dma_addr = dma_unmap_addr(ri, mapping);
5814 post_ptr = &std_prod_idx;
5816 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5817 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5818 dma_addr = dma_unmap_addr(ri, mapping);
5820 post_ptr = &jmb_prod_idx;
5822 goto next_pkt_nopost;
5824 work_mask |= opaque_key;
5826 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5827 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5829 tg3_recycle_rx(tnapi, tpr, opaque_key,
5830 desc_idx, *post_ptr);
5832 /* Other statistics kept track of by card. */
5837 prefetch(data + TG3_RX_OFFSET(tp));
5838 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5841 if (len > TG3_RX_COPY_THRESH(tp)) {
5844 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5849 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5850 PCI_DMA_FROMDEVICE);
5852 skb = build_skb(data);
5855 goto drop_it_no_recycle;
5857 skb_reserve(skb, TG3_RX_OFFSET(tp));
5858 /* Ensure that the update to the data happens
5859 * after the usage of the old DMA mapping.
5866 tg3_recycle_rx(tnapi, tpr, opaque_key,
5867 desc_idx, *post_ptr);
5869 skb = netdev_alloc_skb(tp->dev,
5870 len + TG3_RAW_IP_ALIGN);
5872 goto drop_it_no_recycle;
5874 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5875 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5877 data + TG3_RX_OFFSET(tp),
5879 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5883 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5884 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5885 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5886 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5887 skb->ip_summed = CHECKSUM_UNNECESSARY;
5889 skb_checksum_none_assert(skb);
5891 skb->protocol = eth_type_trans(skb, tp->dev);
5893 if (len > (tp->dev->mtu + ETH_HLEN) &&
5894 skb->protocol != htons(ETH_P_8021Q)) {
5896 goto drop_it_no_recycle;
5899 if (desc->type_flags & RXD_FLAG_VLAN &&
5900 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5901 __vlan_hwaccel_put_tag(skb,
5902 desc->err_vlan & RXD_VLAN_MASK);
5904 napi_gro_receive(&tnapi->napi, skb);
5912 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5913 tpr->rx_std_prod_idx = std_prod_idx &
5914 tp->rx_std_ring_mask;
5915 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5916 tpr->rx_std_prod_idx);
5917 work_mask &= ~RXD_OPAQUE_RING_STD;
5922 sw_idx &= tp->rx_ret_ring_mask;
5924 /* Refresh hw_idx to see if there is new work */
5925 if (sw_idx == hw_idx) {
5926 hw_idx = *(tnapi->rx_rcb_prod_idx);
5931 /* ACK the status ring. */
5932 tnapi->rx_rcb_ptr = sw_idx;
5933 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5935 /* Refill RX ring(s). */
5936 if (!tg3_flag(tp, ENABLE_RSS)) {
5937 /* Sync BD data before updating mailbox */
5940 if (work_mask & RXD_OPAQUE_RING_STD) {
5941 tpr->rx_std_prod_idx = std_prod_idx &
5942 tp->rx_std_ring_mask;
5943 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5944 tpr->rx_std_prod_idx);
5946 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5947 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5948 tp->rx_jmb_ring_mask;
5949 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5950 tpr->rx_jmb_prod_idx);
5953 } else if (work_mask) {
5954 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5955 * updated before the producer indices can be updated.
5959 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5960 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5962 if (tnapi != &tp->napi[1]) {
5963 tp->rx_refill = true;
5964 napi_schedule(&tp->napi[1].napi);
5971 static void tg3_poll_link(struct tg3 *tp)
5973 /* handle link change and other phy events */
5974 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5975 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5977 if (sblk->status & SD_STATUS_LINK_CHG) {
5978 sblk->status = SD_STATUS_UPDATED |
5979 (sblk->status & ~SD_STATUS_LINK_CHG);
5980 spin_lock(&tp->lock);
5981 if (tg3_flag(tp, USE_PHYLIB)) {
5983 (MAC_STATUS_SYNC_CHANGED |
5984 MAC_STATUS_CFG_CHANGED |
5985 MAC_STATUS_MI_COMPLETION |
5986 MAC_STATUS_LNKSTATE_CHANGED));
5989 tg3_setup_phy(tp, 0);
5990 spin_unlock(&tp->lock);
5995 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5996 struct tg3_rx_prodring_set *dpr,
5997 struct tg3_rx_prodring_set *spr)
5999 u32 si, di, cpycnt, src_prod_idx;
6003 src_prod_idx = spr->rx_std_prod_idx;
6005 /* Make sure updates to the rx_std_buffers[] entries and the
6006 * standard producer index are seen in the correct order.
6010 if (spr->rx_std_cons_idx == src_prod_idx)
6013 if (spr->rx_std_cons_idx < src_prod_idx)
6014 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6016 cpycnt = tp->rx_std_ring_mask + 1 -
6017 spr->rx_std_cons_idx;
6019 cpycnt = min(cpycnt,
6020 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6022 si = spr->rx_std_cons_idx;
6023 di = dpr->rx_std_prod_idx;
6025 for (i = di; i < di + cpycnt; i++) {
6026 if (dpr->rx_std_buffers[i].data) {
6036 /* Ensure that updates to the rx_std_buffers ring and the
6037 * shadowed hardware producer ring from tg3_recycle_skb() are
6038 * ordered correctly WRT the skb check above.
6042 memcpy(&dpr->rx_std_buffers[di],
6043 &spr->rx_std_buffers[si],
6044 cpycnt * sizeof(struct ring_info));
6046 for (i = 0; i < cpycnt; i++, di++, si++) {
6047 struct tg3_rx_buffer_desc *sbd, *dbd;
6048 sbd = &spr->rx_std[si];
6049 dbd = &dpr->rx_std[di];
6050 dbd->addr_hi = sbd->addr_hi;
6051 dbd->addr_lo = sbd->addr_lo;
6054 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6055 tp->rx_std_ring_mask;
6056 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6057 tp->rx_std_ring_mask;
6061 src_prod_idx = spr->rx_jmb_prod_idx;
6063 /* Make sure updates to the rx_jmb_buffers[] entries and
6064 * the jumbo producer index are seen in the correct order.
6068 if (spr->rx_jmb_cons_idx == src_prod_idx)
6071 if (spr->rx_jmb_cons_idx < src_prod_idx)
6072 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6074 cpycnt = tp->rx_jmb_ring_mask + 1 -
6075 spr->rx_jmb_cons_idx;
6077 cpycnt = min(cpycnt,
6078 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6080 si = spr->rx_jmb_cons_idx;
6081 di = dpr->rx_jmb_prod_idx;
6083 for (i = di; i < di + cpycnt; i++) {
6084 if (dpr->rx_jmb_buffers[i].data) {
6094 /* Ensure that updates to the rx_jmb_buffers ring and the
6095 * shadowed hardware producer ring from tg3_recycle_skb() are
6096 * ordered correctly WRT the skb check above.
6100 memcpy(&dpr->rx_jmb_buffers[di],
6101 &spr->rx_jmb_buffers[si],
6102 cpycnt * sizeof(struct ring_info));
6104 for (i = 0; i < cpycnt; i++, di++, si++) {
6105 struct tg3_rx_buffer_desc *sbd, *dbd;
6106 sbd = &spr->rx_jmb[si].std;
6107 dbd = &dpr->rx_jmb[di].std;
6108 dbd->addr_hi = sbd->addr_hi;
6109 dbd->addr_lo = sbd->addr_lo;
6112 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6113 tp->rx_jmb_ring_mask;
6114 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6115 tp->rx_jmb_ring_mask;
6121 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6123 struct tg3 *tp = tnapi->tp;
6125 /* run TX completion thread */
6126 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6128 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6132 if (!tnapi->rx_rcb_prod_idx)
6135 /* run RX thread, within the bounds set by NAPI.
6136 * All RX "locking" is done by ensuring outside
6137 * code synchronizes with tg3->napi.poll()
6139 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6140 work_done += tg3_rx(tnapi, budget - work_done);
6142 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6143 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6145 u32 std_prod_idx = dpr->rx_std_prod_idx;
6146 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6148 tp->rx_refill = false;
6149 for (i = 1; i < tp->irq_cnt; i++)
6150 err |= tg3_rx_prodring_xfer(tp, dpr,
6151 &tp->napi[i].prodring);
6155 if (std_prod_idx != dpr->rx_std_prod_idx)
6156 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6157 dpr->rx_std_prod_idx);
6159 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6160 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6161 dpr->rx_jmb_prod_idx);
6166 tw32_f(HOSTCC_MODE, tp->coal_now);
6172 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6174 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6175 schedule_work(&tp->reset_task);
6178 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6180 cancel_work_sync(&tp->reset_task);
6181 tg3_flag_clear(tp, RESET_TASK_PENDING);
6182 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6185 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6187 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6188 struct tg3 *tp = tnapi->tp;
6190 struct tg3_hw_status *sblk = tnapi->hw_status;
6193 work_done = tg3_poll_work(tnapi, work_done, budget);
6195 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6198 if (unlikely(work_done >= budget))
6201 /* tp->last_tag is used in tg3_int_reenable() below
6202 * to tell the hw how much work has been processed,
6203 * so we must read it before checking for more work.
6205 tnapi->last_tag = sblk->status_tag;
6206 tnapi->last_irq_tag = tnapi->last_tag;
6209 /* check for RX/TX work to do */
6210 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6211 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6213 /* This test here is not race free, but will reduce
6214 * the number of interrupts by looping again.
6216 if (tnapi == &tp->napi[1] && tp->rx_refill)
6219 napi_complete(napi);
6220 /* Reenable interrupts. */
6221 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6223 /* This test here is synchronized by napi_schedule()
6224 * and napi_complete() to close the race condition.
6226 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6227 tw32(HOSTCC_MODE, tp->coalesce_mode |
6228 HOSTCC_MODE_ENABLE |
6239 /* work_done is guaranteed to be less than budget. */
6240 napi_complete(napi);
6241 tg3_reset_task_schedule(tp);
6245 static void tg3_process_error(struct tg3 *tp)
6248 bool real_error = false;
6250 if (tg3_flag(tp, ERROR_PROCESSED))
6253 /* Check Flow Attention register */
6254 val = tr32(HOSTCC_FLOW_ATTN);
6255 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6256 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6260 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6261 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6265 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6266 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6275 tg3_flag_set(tp, ERROR_PROCESSED);
6276 tg3_reset_task_schedule(tp);
6279 static int tg3_poll(struct napi_struct *napi, int budget)
6281 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6282 struct tg3 *tp = tnapi->tp;
6284 struct tg3_hw_status *sblk = tnapi->hw_status;
6287 if (sblk->status & SD_STATUS_ERROR)
6288 tg3_process_error(tp);
6292 work_done = tg3_poll_work(tnapi, work_done, budget);
6294 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6297 if (unlikely(work_done >= budget))
6300 if (tg3_flag(tp, TAGGED_STATUS)) {
6301 /* tp->last_tag is used in tg3_int_reenable() below
6302 * to tell the hw how much work has been processed,
6303 * so we must read it before checking for more work.
6305 tnapi->last_tag = sblk->status_tag;
6306 tnapi->last_irq_tag = tnapi->last_tag;
6309 sblk->status &= ~SD_STATUS_UPDATED;
6311 if (likely(!tg3_has_work(tnapi))) {
6312 napi_complete(napi);
6313 tg3_int_reenable(tnapi);
6321 /* work_done is guaranteed to be less than budget. */
6322 napi_complete(napi);
6323 tg3_reset_task_schedule(tp);
6327 static void tg3_napi_disable(struct tg3 *tp)
6331 for (i = tp->irq_cnt - 1; i >= 0; i--)
6332 napi_disable(&tp->napi[i].napi);
6335 static void tg3_napi_enable(struct tg3 *tp)
6339 for (i = 0; i < tp->irq_cnt; i++)
6340 napi_enable(&tp->napi[i].napi);
6343 static void tg3_napi_init(struct tg3 *tp)
6347 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6348 for (i = 1; i < tp->irq_cnt; i++)
6349 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6352 static void tg3_napi_fini(struct tg3 *tp)
6356 for (i = 0; i < tp->irq_cnt; i++)
6357 netif_napi_del(&tp->napi[i].napi);
6360 static inline void tg3_netif_stop(struct tg3 *tp)
6362 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6363 tg3_napi_disable(tp);
6364 netif_tx_disable(tp->dev);
6367 static inline void tg3_netif_start(struct tg3 *tp)
6369 /* NOTE: unconditional netif_tx_wake_all_queues is only
6370 * appropriate so long as all callers are assured to
6371 * have free tx slots (such as after tg3_init_hw)
6373 netif_tx_wake_all_queues(tp->dev);
6375 tg3_napi_enable(tp);
6376 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6377 tg3_enable_ints(tp);
6380 static void tg3_irq_quiesce(struct tg3 *tp)
6384 BUG_ON(tp->irq_sync);
6389 for (i = 0; i < tp->irq_cnt; i++)
6390 synchronize_irq(tp->napi[i].irq_vec);
6393 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6394 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6395 * with as well. Most of the time, this is not necessary except when
6396 * shutting down the device.
6398 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6400 spin_lock_bh(&tp->lock);
6402 tg3_irq_quiesce(tp);
6405 static inline void tg3_full_unlock(struct tg3 *tp)
6407 spin_unlock_bh(&tp->lock);
6410 /* One-shot MSI handler - Chip automatically disables interrupt
6411 * after sending MSI so driver doesn't have to do it.
6413 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6415 struct tg3_napi *tnapi = dev_id;
6416 struct tg3 *tp = tnapi->tp;
6418 prefetch(tnapi->hw_status);
6420 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6422 if (likely(!tg3_irq_sync(tp)))
6423 napi_schedule(&tnapi->napi);
6428 /* MSI ISR - No need to check for interrupt sharing and no need to
6429 * flush status block and interrupt mailbox. PCI ordering rules
6430 * guarantee that MSI will arrive after the status block.
6432 static irqreturn_t tg3_msi(int irq, void *dev_id)
6434 struct tg3_napi *tnapi = dev_id;
6435 struct tg3 *tp = tnapi->tp;
6437 prefetch(tnapi->hw_status);
6439 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6441 * Writing any value to intr-mbox-0 clears PCI INTA# and
6442 * chip-internal interrupt pending events.
6443 * Writing non-zero to intr-mbox-0 additional tells the
6444 * NIC to stop sending us irqs, engaging "in-intr-handler"
6447 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6448 if (likely(!tg3_irq_sync(tp)))
6449 napi_schedule(&tnapi->napi);
6451 return IRQ_RETVAL(1);
6454 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6456 struct tg3_napi *tnapi = dev_id;
6457 struct tg3 *tp = tnapi->tp;
6458 struct tg3_hw_status *sblk = tnapi->hw_status;
6459 unsigned int handled = 1;
6461 /* In INTx mode, it is possible for the interrupt to arrive at
6462 * the CPU before the status block posted prior to the interrupt.
6463 * Reading the PCI State register will confirm whether the
6464 * interrupt is ours and will flush the status block.
6466 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6467 if (tg3_flag(tp, CHIP_RESETTING) ||
6468 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6475 * Writing any value to intr-mbox-0 clears PCI INTA# and
6476 * chip-internal interrupt pending events.
6477 * Writing non-zero to intr-mbox-0 additional tells the
6478 * NIC to stop sending us irqs, engaging "in-intr-handler"
6481 * Flush the mailbox to de-assert the IRQ immediately to prevent
6482 * spurious interrupts. The flush impacts performance but
6483 * excessive spurious interrupts can be worse in some cases.
6485 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6486 if (tg3_irq_sync(tp))
6488 sblk->status &= ~SD_STATUS_UPDATED;
6489 if (likely(tg3_has_work(tnapi))) {
6490 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6491 napi_schedule(&tnapi->napi);
6493 /* No work, shared interrupt perhaps? re-enable
6494 * interrupts, and flush that PCI write
6496 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6500 return IRQ_RETVAL(handled);
6503 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6505 struct tg3_napi *tnapi = dev_id;
6506 struct tg3 *tp = tnapi->tp;
6507 struct tg3_hw_status *sblk = tnapi->hw_status;
6508 unsigned int handled = 1;
6510 /* In INTx mode, it is possible for the interrupt to arrive at
6511 * the CPU before the status block posted prior to the interrupt.
6512 * Reading the PCI State register will confirm whether the
6513 * interrupt is ours and will flush the status block.
6515 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6516 if (tg3_flag(tp, CHIP_RESETTING) ||
6517 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6524 * writing any value to intr-mbox-0 clears PCI INTA# and
6525 * chip-internal interrupt pending events.
6526 * writing non-zero to intr-mbox-0 additional tells the
6527 * NIC to stop sending us irqs, engaging "in-intr-handler"
6530 * Flush the mailbox to de-assert the IRQ immediately to prevent
6531 * spurious interrupts. The flush impacts performance but
6532 * excessive spurious interrupts can be worse in some cases.
6534 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6537 * In a shared interrupt configuration, sometimes other devices'
6538 * interrupts will scream. We record the current status tag here
6539 * so that the above check can report that the screaming interrupts
6540 * are unhandled. Eventually they will be silenced.
6542 tnapi->last_irq_tag = sblk->status_tag;
6544 if (tg3_irq_sync(tp))
6547 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6549 napi_schedule(&tnapi->napi);
6552 return IRQ_RETVAL(handled);
6555 /* ISR for interrupt test */
6556 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6558 struct tg3_napi *tnapi = dev_id;
6559 struct tg3 *tp = tnapi->tp;
6560 struct tg3_hw_status *sblk = tnapi->hw_status;
6562 if ((sblk->status & SD_STATUS_UPDATED) ||
6563 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6564 tg3_disable_ints(tp);
6565 return IRQ_RETVAL(1);
6567 return IRQ_RETVAL(0);
6570 #ifdef CONFIG_NET_POLL_CONTROLLER
6571 static void tg3_poll_controller(struct net_device *dev)
6574 struct tg3 *tp = netdev_priv(dev);
6576 for (i = 0; i < tp->irq_cnt; i++)
6577 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6581 static void tg3_tx_timeout(struct net_device *dev)
6583 struct tg3 *tp = netdev_priv(dev);
6585 if (netif_msg_tx_err(tp)) {
6586 netdev_err(dev, "transmit timed out, resetting\n");
6590 tg3_reset_task_schedule(tp);
6593 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6594 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6596 u32 base = (u32) mapping & 0xffffffff;
6598 return (base > 0xffffdcc0) && (base + len + 8 < base);
6601 /* Test for DMA addresses > 40-bit */
6602 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6605 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6606 if (tg3_flag(tp, 40BIT_DMA_BUG))
6607 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6614 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6615 dma_addr_t mapping, u32 len, u32 flags,
6618 txbd->addr_hi = ((u64) mapping >> 32);
6619 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6620 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6621 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6624 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6625 dma_addr_t map, u32 len, u32 flags,
6628 struct tg3 *tp = tnapi->tp;
6631 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6634 if (tg3_4g_overflow_test(map, len))
6637 if (tg3_40bit_overflow_test(tp, map, len))
6640 if (tp->dma_limit) {
6641 u32 prvidx = *entry;
6642 u32 tmp_flag = flags & ~TXD_FLAG_END;
6643 while (len > tp->dma_limit && *budget) {
6644 u32 frag_len = tp->dma_limit;
6645 len -= tp->dma_limit;
6647 /* Avoid the 8byte DMA problem */
6649 len += tp->dma_limit / 2;
6650 frag_len = tp->dma_limit / 2;
6653 tnapi->tx_buffers[*entry].fragmented = true;
6655 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6656 frag_len, tmp_flag, mss, vlan);
6659 *entry = NEXT_TX(*entry);
6666 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6667 len, flags, mss, vlan);
6669 *entry = NEXT_TX(*entry);
6672 tnapi->tx_buffers[prvidx].fragmented = false;
6676 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6677 len, flags, mss, vlan);
6678 *entry = NEXT_TX(*entry);
6684 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6687 struct sk_buff *skb;
6688 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6693 pci_unmap_single(tnapi->tp->pdev,
6694 dma_unmap_addr(txb, mapping),
6698 while (txb->fragmented) {
6699 txb->fragmented = false;
6700 entry = NEXT_TX(entry);
6701 txb = &tnapi->tx_buffers[entry];
6704 for (i = 0; i <= last; i++) {
6705 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6707 entry = NEXT_TX(entry);
6708 txb = &tnapi->tx_buffers[entry];
6710 pci_unmap_page(tnapi->tp->pdev,
6711 dma_unmap_addr(txb, mapping),
6712 skb_frag_size(frag), PCI_DMA_TODEVICE);
6714 while (txb->fragmented) {
6715 txb->fragmented = false;
6716 entry = NEXT_TX(entry);
6717 txb = &tnapi->tx_buffers[entry];
6722 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6723 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6724 struct sk_buff **pskb,
6725 u32 *entry, u32 *budget,
6726 u32 base_flags, u32 mss, u32 vlan)
6728 struct tg3 *tp = tnapi->tp;
6729 struct sk_buff *new_skb, *skb = *pskb;
6730 dma_addr_t new_addr = 0;
6733 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6734 new_skb = skb_copy(skb, GFP_ATOMIC);
6736 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6738 new_skb = skb_copy_expand(skb,
6739 skb_headroom(skb) + more_headroom,
6740 skb_tailroom(skb), GFP_ATOMIC);
6746 /* New SKB is guaranteed to be linear. */
6747 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6749 /* Make sure the mapping succeeded */
6750 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6751 dev_kfree_skb(new_skb);
6754 u32 save_entry = *entry;
6756 base_flags |= TXD_FLAG_END;
6758 tnapi->tx_buffers[*entry].skb = new_skb;
6759 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6762 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6763 new_skb->len, base_flags,
6765 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6766 dev_kfree_skb(new_skb);
6777 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6779 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6780 * TSO header is greater than 80 bytes.
6782 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6784 struct sk_buff *segs, *nskb;
6785 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6787 /* Estimate the number of fragments in the worst case */
6788 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6789 netif_stop_queue(tp->dev);
6791 /* netif_tx_stop_queue() must be done before checking
6792 * checking tx index in tg3_tx_avail() below, because in
6793 * tg3_tx(), we update tx index before checking for
6794 * netif_tx_queue_stopped().
6797 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6798 return NETDEV_TX_BUSY;
6800 netif_wake_queue(tp->dev);
6803 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6805 goto tg3_tso_bug_end;
6811 tg3_start_xmit(nskb, tp->dev);
6817 return NETDEV_TX_OK;
6820 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6821 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6823 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6825 struct tg3 *tp = netdev_priv(dev);
6826 u32 len, entry, base_flags, mss, vlan = 0;
6828 int i = -1, would_hit_hwbug;
6830 struct tg3_napi *tnapi;
6831 struct netdev_queue *txq;
6834 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6835 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6836 if (tg3_flag(tp, ENABLE_TSS))
6839 budget = tg3_tx_avail(tnapi);
6841 /* We are running in BH disabled context with netif_tx_lock
6842 * and TX reclaim runs via tp->napi.poll inside of a software
6843 * interrupt. Furthermore, IRQ processing runs lockless so we have
6844 * no IRQ context deadlocks to worry about either. Rejoice!
6846 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6847 if (!netif_tx_queue_stopped(txq)) {
6848 netif_tx_stop_queue(txq);
6850 /* This is a hard error, log it. */
6852 "BUG! Tx Ring full when queue awake!\n");
6854 return NETDEV_TX_BUSY;
6857 entry = tnapi->tx_prod;
6859 if (skb->ip_summed == CHECKSUM_PARTIAL)
6860 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6862 mss = skb_shinfo(skb)->gso_size;
6865 u32 tcp_opt_len, hdr_len;
6867 if (skb_header_cloned(skb) &&
6868 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6872 tcp_opt_len = tcp_optlen(skb);
6874 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6876 if (!skb_is_gso_v6(skb)) {
6878 iph->tot_len = htons(mss + hdr_len);
6881 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6882 tg3_flag(tp, TSO_BUG))
6883 return tg3_tso_bug(tp, skb);
6885 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6886 TXD_FLAG_CPU_POST_DMA);
6888 if (tg3_flag(tp, HW_TSO_1) ||
6889 tg3_flag(tp, HW_TSO_2) ||
6890 tg3_flag(tp, HW_TSO_3)) {
6891 tcp_hdr(skb)->check = 0;
6892 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6894 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6899 if (tg3_flag(tp, HW_TSO_3)) {
6900 mss |= (hdr_len & 0xc) << 12;
6902 base_flags |= 0x00000010;
6903 base_flags |= (hdr_len & 0x3e0) << 5;
6904 } else if (tg3_flag(tp, HW_TSO_2))
6905 mss |= hdr_len << 9;
6906 else if (tg3_flag(tp, HW_TSO_1) ||
6907 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6908 if (tcp_opt_len || iph->ihl > 5) {
6911 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6912 mss |= (tsflags << 11);
6915 if (tcp_opt_len || iph->ihl > 5) {
6918 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6919 base_flags |= tsflags << 12;
6924 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6925 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6926 base_flags |= TXD_FLAG_JMB_PKT;
6928 if (vlan_tx_tag_present(skb)) {
6929 base_flags |= TXD_FLAG_VLAN;
6930 vlan = vlan_tx_tag_get(skb);
6933 len = skb_headlen(skb);
6935 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6936 if (pci_dma_mapping_error(tp->pdev, mapping))
6940 tnapi->tx_buffers[entry].skb = skb;
6941 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6943 would_hit_hwbug = 0;
6945 if (tg3_flag(tp, 5701_DMA_BUG))
6946 would_hit_hwbug = 1;
6948 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6949 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6951 would_hit_hwbug = 1;
6952 } else if (skb_shinfo(skb)->nr_frags > 0) {
6955 if (!tg3_flag(tp, HW_TSO_1) &&
6956 !tg3_flag(tp, HW_TSO_2) &&
6957 !tg3_flag(tp, HW_TSO_3))
6960 /* Now loop through additional data
6961 * fragments, and queue them.
6963 last = skb_shinfo(skb)->nr_frags - 1;
6964 for (i = 0; i <= last; i++) {
6965 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6967 len = skb_frag_size(frag);
6968 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6969 len, DMA_TO_DEVICE);
6971 tnapi->tx_buffers[entry].skb = NULL;
6972 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6974 if (dma_mapping_error(&tp->pdev->dev, mapping))
6978 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6980 ((i == last) ? TXD_FLAG_END : 0),
6982 would_hit_hwbug = 1;
6988 if (would_hit_hwbug) {
6989 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6991 /* If the workaround fails due to memory/mapping
6992 * failure, silently drop this packet.
6994 entry = tnapi->tx_prod;
6995 budget = tg3_tx_avail(tnapi);
6996 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6997 base_flags, mss, vlan))
7001 skb_tx_timestamp(skb);
7002 netdev_tx_sent_queue(txq, skb->len);
7004 /* Sync BD data before updating mailbox */
7007 /* Packets are ready, update Tx producer idx local and on card. */
7008 tw32_tx_mbox(tnapi->prodmbox, entry);
7010 tnapi->tx_prod = entry;
7011 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7012 netif_tx_stop_queue(txq);
7014 /* netif_tx_stop_queue() must be done before checking
7015 * checking tx index in tg3_tx_avail() below, because in
7016 * tg3_tx(), we update tx index before checking for
7017 * netif_tx_queue_stopped().
7020 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7021 netif_tx_wake_queue(txq);
7025 return NETDEV_TX_OK;
7028 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7029 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7034 return NETDEV_TX_OK;
7037 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7040 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7041 MAC_MODE_PORT_MODE_MASK);
7043 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7045 if (!tg3_flag(tp, 5705_PLUS))
7046 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7048 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7049 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7051 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7053 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7055 if (tg3_flag(tp, 5705_PLUS) ||
7056 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7058 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7061 tw32(MAC_MODE, tp->mac_mode);
7065 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7067 u32 val, bmcr, mac_mode, ptest = 0;
7069 tg3_phy_toggle_apd(tp, false);
7070 tg3_phy_toggle_automdix(tp, 0);
7072 if (extlpbk && tg3_phy_set_extloopbk(tp))
7075 bmcr = BMCR_FULLDPLX;
7080 bmcr |= BMCR_SPEED100;
7084 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7086 bmcr |= BMCR_SPEED100;
7089 bmcr |= BMCR_SPEED1000;
7094 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7095 tg3_readphy(tp, MII_CTRL1000, &val);
7096 val |= CTL1000_AS_MASTER |
7097 CTL1000_ENABLE_MASTER;
7098 tg3_writephy(tp, MII_CTRL1000, val);
7100 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7101 MII_TG3_FET_PTEST_TRIM_2;
7102 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7105 bmcr |= BMCR_LOOPBACK;
7107 tg3_writephy(tp, MII_BMCR, bmcr);
7109 /* The write needs to be flushed for the FETs */
7110 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7111 tg3_readphy(tp, MII_BMCR, &bmcr);
7115 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7116 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7117 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7118 MII_TG3_FET_PTEST_FRC_TX_LINK |
7119 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7121 /* The write needs to be flushed for the AC131 */
7122 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7125 /* Reset to prevent losing 1st rx packet intermittently */
7126 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7127 tg3_flag(tp, 5780_CLASS)) {
7128 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7130 tw32_f(MAC_RX_MODE, tp->rx_mode);
7133 mac_mode = tp->mac_mode &
7134 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7135 if (speed == SPEED_1000)
7136 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7138 mac_mode |= MAC_MODE_PORT_MODE_MII;
7140 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7141 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7143 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7144 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7145 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7146 mac_mode |= MAC_MODE_LINK_POLARITY;
7148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7149 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7152 tw32(MAC_MODE, mac_mode);
7158 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7160 struct tg3 *tp = netdev_priv(dev);
7162 if (features & NETIF_F_LOOPBACK) {
7163 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7166 spin_lock_bh(&tp->lock);
7167 tg3_mac_loopback(tp, true);
7168 netif_carrier_on(tp->dev);
7169 spin_unlock_bh(&tp->lock);
7170 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7172 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7175 spin_lock_bh(&tp->lock);
7176 tg3_mac_loopback(tp, false);
7177 /* Force link status check */
7178 tg3_setup_phy(tp, 1);
7179 spin_unlock_bh(&tp->lock);
7180 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7184 static netdev_features_t tg3_fix_features(struct net_device *dev,
7185 netdev_features_t features)
7187 struct tg3 *tp = netdev_priv(dev);
7189 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7190 features &= ~NETIF_F_ALL_TSO;
7195 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7197 netdev_features_t changed = dev->features ^ features;
7199 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7200 tg3_set_loopback(dev, features);
7205 static void tg3_rx_prodring_free(struct tg3 *tp,
7206 struct tg3_rx_prodring_set *tpr)
7210 if (tpr != &tp->napi[0].prodring) {
7211 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7212 i = (i + 1) & tp->rx_std_ring_mask)
7213 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7216 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7217 for (i = tpr->rx_jmb_cons_idx;
7218 i != tpr->rx_jmb_prod_idx;
7219 i = (i + 1) & tp->rx_jmb_ring_mask) {
7220 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7228 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7229 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7232 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7233 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7234 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7239 /* Initialize rx rings for packet processing.
7241 * The chip has been shut down and the driver detached from
7242 * the networking, so no interrupts or new tx packets will
7243 * end up in the driver. tp->{tx,}lock are held and thus
7246 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7247 struct tg3_rx_prodring_set *tpr)
7249 u32 i, rx_pkt_dma_sz;
7251 tpr->rx_std_cons_idx = 0;
7252 tpr->rx_std_prod_idx = 0;
7253 tpr->rx_jmb_cons_idx = 0;
7254 tpr->rx_jmb_prod_idx = 0;
7256 if (tpr != &tp->napi[0].prodring) {
7257 memset(&tpr->rx_std_buffers[0], 0,
7258 TG3_RX_STD_BUFF_RING_SIZE(tp));
7259 if (tpr->rx_jmb_buffers)
7260 memset(&tpr->rx_jmb_buffers[0], 0,
7261 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7265 /* Zero out all descriptors. */
7266 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7268 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7269 if (tg3_flag(tp, 5780_CLASS) &&
7270 tp->dev->mtu > ETH_DATA_LEN)
7271 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7272 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7274 /* Initialize invariants of the rings, we only set this
7275 * stuff once. This works because the card does not
7276 * write into the rx buffer posting rings.
7278 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7279 struct tg3_rx_buffer_desc *rxd;
7281 rxd = &tpr->rx_std[i];
7282 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7283 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7284 rxd->opaque = (RXD_OPAQUE_RING_STD |
7285 (i << RXD_OPAQUE_INDEX_SHIFT));
7288 /* Now allocate fresh SKBs for each rx ring. */
7289 for (i = 0; i < tp->rx_pending; i++) {
7290 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7291 netdev_warn(tp->dev,
7292 "Using a smaller RX standard ring. Only "
7293 "%d out of %d buffers were allocated "
7294 "successfully\n", i, tp->rx_pending);
7302 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7305 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7307 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7310 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7311 struct tg3_rx_buffer_desc *rxd;
7313 rxd = &tpr->rx_jmb[i].std;
7314 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7315 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7317 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7318 (i << RXD_OPAQUE_INDEX_SHIFT));
7321 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7322 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7323 netdev_warn(tp->dev,
7324 "Using a smaller RX jumbo ring. Only %d "
7325 "out of %d buffers were allocated "
7326 "successfully\n", i, tp->rx_jumbo_pending);
7329 tp->rx_jumbo_pending = i;
7338 tg3_rx_prodring_free(tp, tpr);
7342 static void tg3_rx_prodring_fini(struct tg3 *tp,
7343 struct tg3_rx_prodring_set *tpr)
7345 kfree(tpr->rx_std_buffers);
7346 tpr->rx_std_buffers = NULL;
7347 kfree(tpr->rx_jmb_buffers);
7348 tpr->rx_jmb_buffers = NULL;
7350 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7351 tpr->rx_std, tpr->rx_std_mapping);
7355 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7356 tpr->rx_jmb, tpr->rx_jmb_mapping);
7361 static int tg3_rx_prodring_init(struct tg3 *tp,
7362 struct tg3_rx_prodring_set *tpr)
7364 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7366 if (!tpr->rx_std_buffers)
7369 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7370 TG3_RX_STD_RING_BYTES(tp),
7371 &tpr->rx_std_mapping,
7376 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7377 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7379 if (!tpr->rx_jmb_buffers)
7382 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7383 TG3_RX_JMB_RING_BYTES(tp),
7384 &tpr->rx_jmb_mapping,
7393 tg3_rx_prodring_fini(tp, tpr);
7397 /* Free up pending packets in all rx/tx rings.
7399 * The chip has been shut down and the driver detached from
7400 * the networking, so no interrupts or new tx packets will
7401 * end up in the driver. tp->{tx,}lock is not held and we are not
7402 * in an interrupt context and thus may sleep.
7404 static void tg3_free_rings(struct tg3 *tp)
7408 for (j = 0; j < tp->irq_cnt; j++) {
7409 struct tg3_napi *tnapi = &tp->napi[j];
7411 tg3_rx_prodring_free(tp, &tnapi->prodring);
7413 if (!tnapi->tx_buffers)
7416 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7417 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7422 tg3_tx_skb_unmap(tnapi, i,
7423 skb_shinfo(skb)->nr_frags - 1);
7425 dev_kfree_skb_any(skb);
7427 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7431 /* Initialize tx/rx rings for packet processing.
7433 * The chip has been shut down and the driver detached from
7434 * the networking, so no interrupts or new tx packets will
7435 * end up in the driver. tp->{tx,}lock are held and thus
7438 static int tg3_init_rings(struct tg3 *tp)
7442 /* Free up all the SKBs. */
7445 for (i = 0; i < tp->irq_cnt; i++) {
7446 struct tg3_napi *tnapi = &tp->napi[i];
7448 tnapi->last_tag = 0;
7449 tnapi->last_irq_tag = 0;
7450 tnapi->hw_status->status = 0;
7451 tnapi->hw_status->status_tag = 0;
7452 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7457 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7459 tnapi->rx_rcb_ptr = 0;
7461 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7463 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7473 * Must not be invoked with interrupt sources disabled and
7474 * the hardware shutdown down.
7476 static void tg3_free_consistent(struct tg3 *tp)
7480 for (i = 0; i < tp->irq_cnt; i++) {
7481 struct tg3_napi *tnapi = &tp->napi[i];
7483 if (tnapi->tx_ring) {
7484 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7485 tnapi->tx_ring, tnapi->tx_desc_mapping);
7486 tnapi->tx_ring = NULL;
7489 kfree(tnapi->tx_buffers);
7490 tnapi->tx_buffers = NULL;
7492 if (tnapi->rx_rcb) {
7493 dma_free_coherent(&tp->pdev->dev,
7494 TG3_RX_RCB_RING_BYTES(tp),
7496 tnapi->rx_rcb_mapping);
7497 tnapi->rx_rcb = NULL;
7500 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7502 if (tnapi->hw_status) {
7503 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7505 tnapi->status_mapping);
7506 tnapi->hw_status = NULL;
7511 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7512 tp->hw_stats, tp->stats_mapping);
7513 tp->hw_stats = NULL;
7518 * Must not be invoked with interrupt sources disabled and
7519 * the hardware shutdown down. Can sleep.
7521 static int tg3_alloc_consistent(struct tg3 *tp)
7525 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7526 sizeof(struct tg3_hw_stats),
7532 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7534 for (i = 0; i < tp->irq_cnt; i++) {
7535 struct tg3_napi *tnapi = &tp->napi[i];
7536 struct tg3_hw_status *sblk;
7538 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7540 &tnapi->status_mapping,
7542 if (!tnapi->hw_status)
7545 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7546 sblk = tnapi->hw_status;
7548 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7551 /* If multivector TSS is enabled, vector 0 does not handle
7552 * tx interrupts. Don't allocate any resources for it.
7554 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7555 (i && tg3_flag(tp, ENABLE_TSS))) {
7556 tnapi->tx_buffers = kzalloc(
7557 sizeof(struct tg3_tx_ring_info) *
7558 TG3_TX_RING_SIZE, GFP_KERNEL);
7559 if (!tnapi->tx_buffers)
7562 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7564 &tnapi->tx_desc_mapping,
7566 if (!tnapi->tx_ring)
7571 * When RSS is enabled, the status block format changes
7572 * slightly. The "rx_jumbo_consumer", "reserved",
7573 * and "rx_mini_consumer" members get mapped to the
7574 * other three rx return ring producer indexes.
7578 if (tg3_flag(tp, ENABLE_RSS)) {
7579 tnapi->rx_rcb_prod_idx = NULL;
7584 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7587 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7590 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7593 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7598 * If multivector RSS is enabled, vector 0 does not handle
7599 * rx or tx interrupts. Don't allocate any resources for it.
7601 if (!i && tg3_flag(tp, ENABLE_RSS))
7604 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7605 TG3_RX_RCB_RING_BYTES(tp),
7606 &tnapi->rx_rcb_mapping,
7611 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7617 tg3_free_consistent(tp);
7621 #define MAX_WAIT_CNT 1000
7623 /* To stop a block, clear the enable bit and poll till it
7624 * clears. tp->lock is held.
7626 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7631 if (tg3_flag(tp, 5705_PLUS)) {
7638 /* We can't enable/disable these bits of the
7639 * 5705/5750, just say success.
7652 for (i = 0; i < MAX_WAIT_CNT; i++) {
7655 if ((val & enable_bit) == 0)
7659 if (i == MAX_WAIT_CNT && !silent) {
7660 dev_err(&tp->pdev->dev,
7661 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7669 /* tp->lock is held. */
7670 static int tg3_abort_hw(struct tg3 *tp, int silent)
7674 tg3_disable_ints(tp);
7676 tp->rx_mode &= ~RX_MODE_ENABLE;
7677 tw32_f(MAC_RX_MODE, tp->rx_mode);
7680 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7681 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7682 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7683 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7684 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7685 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7687 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7688 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7689 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7690 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7691 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7692 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7693 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7695 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7696 tw32_f(MAC_MODE, tp->mac_mode);
7699 tp->tx_mode &= ~TX_MODE_ENABLE;
7700 tw32_f(MAC_TX_MODE, tp->tx_mode);
7702 for (i = 0; i < MAX_WAIT_CNT; i++) {
7704 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7707 if (i >= MAX_WAIT_CNT) {
7708 dev_err(&tp->pdev->dev,
7709 "%s timed out, TX_MODE_ENABLE will not clear "
7710 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7714 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7715 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7716 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7718 tw32(FTQ_RESET, 0xffffffff);
7719 tw32(FTQ_RESET, 0x00000000);
7721 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7722 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7724 for (i = 0; i < tp->irq_cnt; i++) {
7725 struct tg3_napi *tnapi = &tp->napi[i];
7726 if (tnapi->hw_status)
7727 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7733 /* Save PCI command register before chip reset */
7734 static void tg3_save_pci_state(struct tg3 *tp)
7736 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7739 /* Restore PCI state after chip reset */
7740 static void tg3_restore_pci_state(struct tg3 *tp)
7744 /* Re-enable indirect register accesses. */
7745 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7746 tp->misc_host_ctrl);
7748 /* Set MAX PCI retry to zero. */
7749 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7750 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7751 tg3_flag(tp, PCIX_MODE))
7752 val |= PCISTATE_RETRY_SAME_DMA;
7753 /* Allow reads and writes to the APE register and memory space. */
7754 if (tg3_flag(tp, ENABLE_APE))
7755 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7756 PCISTATE_ALLOW_APE_SHMEM_WR |
7757 PCISTATE_ALLOW_APE_PSPACE_WR;
7758 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7760 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7762 if (!tg3_flag(tp, PCI_EXPRESS)) {
7763 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7764 tp->pci_cacheline_sz);
7765 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7769 /* Make sure PCI-X relaxed ordering bit is clear. */
7770 if (tg3_flag(tp, PCIX_MODE)) {
7773 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7775 pcix_cmd &= ~PCI_X_CMD_ERO;
7776 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7780 if (tg3_flag(tp, 5780_CLASS)) {
7782 /* Chip reset on 5780 will reset MSI enable bit,
7783 * so need to restore it.
7785 if (tg3_flag(tp, USING_MSI)) {
7788 pci_read_config_word(tp->pdev,
7789 tp->msi_cap + PCI_MSI_FLAGS,
7791 pci_write_config_word(tp->pdev,
7792 tp->msi_cap + PCI_MSI_FLAGS,
7793 ctrl | PCI_MSI_FLAGS_ENABLE);
7794 val = tr32(MSGINT_MODE);
7795 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7800 /* tp->lock is held. */
7801 static int tg3_chip_reset(struct tg3 *tp)
7804 void (*write_op)(struct tg3 *, u32, u32);
7809 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7811 /* No matching tg3_nvram_unlock() after this because
7812 * chip reset below will undo the nvram lock.
7814 tp->nvram_lock_cnt = 0;
7816 /* GRC_MISC_CFG core clock reset will clear the memory
7817 * enable bit in PCI register 4 and the MSI enable bit
7818 * on some chips, so we save relevant registers here.
7820 tg3_save_pci_state(tp);
7822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7823 tg3_flag(tp, 5755_PLUS))
7824 tw32(GRC_FASTBOOT_PC, 0);
7827 * We must avoid the readl() that normally takes place.
7828 * It locks machines, causes machine checks, and other
7829 * fun things. So, temporarily disable the 5701
7830 * hardware workaround, while we do the reset.
7832 write_op = tp->write32;
7833 if (write_op == tg3_write_flush_reg32)
7834 tp->write32 = tg3_write32;
7836 /* Prevent the irq handler from reading or writing PCI registers
7837 * during chip reset when the memory enable bit in the PCI command
7838 * register may be cleared. The chip does not generate interrupt
7839 * at this time, but the irq handler may still be called due to irq
7840 * sharing or irqpoll.
7842 tg3_flag_set(tp, CHIP_RESETTING);
7843 for (i = 0; i < tp->irq_cnt; i++) {
7844 struct tg3_napi *tnapi = &tp->napi[i];
7845 if (tnapi->hw_status) {
7846 tnapi->hw_status->status = 0;
7847 tnapi->hw_status->status_tag = 0;
7849 tnapi->last_tag = 0;
7850 tnapi->last_irq_tag = 0;
7854 for (i = 0; i < tp->irq_cnt; i++)
7855 synchronize_irq(tp->napi[i].irq_vec);
7857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7858 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7859 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7863 val = GRC_MISC_CFG_CORECLK_RESET;
7865 if (tg3_flag(tp, PCI_EXPRESS)) {
7866 /* Force PCIe 1.0a mode */
7867 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7868 !tg3_flag(tp, 57765_PLUS) &&
7869 tr32(TG3_PCIE_PHY_TSTCTL) ==
7870 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7871 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7873 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7874 tw32(GRC_MISC_CFG, (1 << 29));
7879 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7880 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7881 tw32(GRC_VCPU_EXT_CTRL,
7882 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7885 /* Manage gphy power for all CPMU absent PCIe devices. */
7886 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7887 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7889 tw32(GRC_MISC_CFG, val);
7891 /* restore 5701 hardware bug workaround write method */
7892 tp->write32 = write_op;
7894 /* Unfortunately, we have to delay before the PCI read back.
7895 * Some 575X chips even will not respond to a PCI cfg access
7896 * when the reset command is given to the chip.
7898 * How do these hardware designers expect things to work
7899 * properly if the PCI write is posted for a long period
7900 * of time? It is always necessary to have some method by
7901 * which a register read back can occur to push the write
7902 * out which does the reset.
7904 * For most tg3 variants the trick below was working.
7909 /* Flush PCI posted writes. The normal MMIO registers
7910 * are inaccessible at this time so this is the only
7911 * way to make this reliably (actually, this is no longer
7912 * the case, see above). I tried to use indirect
7913 * register read/write but this upset some 5701 variants.
7915 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7919 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7922 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7926 /* Wait for link training to complete. */
7927 for (i = 0; i < 5000; i++)
7930 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7931 pci_write_config_dword(tp->pdev, 0xc4,
7932 cfg_val | (1 << 15));
7935 /* Clear the "no snoop" and "relaxed ordering" bits. */
7936 pci_read_config_word(tp->pdev,
7937 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7939 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7940 PCI_EXP_DEVCTL_NOSNOOP_EN);
7942 * Older PCIe devices only support the 128 byte
7943 * MPS setting. Enforce the restriction.
7945 if (!tg3_flag(tp, CPMU_PRESENT))
7946 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7947 pci_write_config_word(tp->pdev,
7948 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7951 /* Clear error status */
7952 pci_write_config_word(tp->pdev,
7953 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7954 PCI_EXP_DEVSTA_CED |
7955 PCI_EXP_DEVSTA_NFED |
7956 PCI_EXP_DEVSTA_FED |
7957 PCI_EXP_DEVSTA_URD);
7960 tg3_restore_pci_state(tp);
7962 tg3_flag_clear(tp, CHIP_RESETTING);
7963 tg3_flag_clear(tp, ERROR_PROCESSED);
7966 if (tg3_flag(tp, 5780_CLASS))
7967 val = tr32(MEMARB_MODE);
7968 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7970 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7972 tw32(0x5000, 0x400);
7975 tw32(GRC_MODE, tp->grc_mode);
7977 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7980 tw32(0xc4, val | (1 << 15));
7983 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7984 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7985 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7986 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7987 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7988 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7991 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7992 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7994 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7995 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8000 tw32_f(MAC_MODE, val);
8003 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8005 err = tg3_poll_fw(tp);
8011 if (tg3_flag(tp, PCI_EXPRESS) &&
8012 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8013 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8014 !tg3_flag(tp, 57765_PLUS)) {
8017 tw32(0x7c00, val | (1 << 25));
8020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8021 val = tr32(TG3_CPMU_CLCK_ORIDE);
8022 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8025 /* Reprobe ASF enable state. */
8026 tg3_flag_clear(tp, ENABLE_ASF);
8027 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8028 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8029 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8032 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8033 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8034 tg3_flag_set(tp, ENABLE_ASF);
8035 tp->last_event_jiffies = jiffies;
8036 if (tg3_flag(tp, 5750_PLUS))
8037 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8044 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8045 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8047 /* tp->lock is held. */
8048 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8054 tg3_write_sig_pre_reset(tp, kind);
8056 tg3_abort_hw(tp, silent);
8057 err = tg3_chip_reset(tp);
8059 __tg3_set_mac_addr(tp, 0);
8061 tg3_write_sig_legacy(tp, kind);
8062 tg3_write_sig_post_reset(tp, kind);
8065 /* Save the stats across chip resets... */
8066 tg3_get_nstats(tp, &tp->net_stats_prev);
8067 tg3_get_estats(tp, &tp->estats_prev);
8069 /* And make sure the next sample is new data */
8070 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8079 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8081 struct tg3 *tp = netdev_priv(dev);
8082 struct sockaddr *addr = p;
8083 int err = 0, skip_mac_1 = 0;
8085 if (!is_valid_ether_addr(addr->sa_data))
8086 return -EADDRNOTAVAIL;
8088 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8090 if (!netif_running(dev))
8093 if (tg3_flag(tp, ENABLE_ASF)) {
8094 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8096 addr0_high = tr32(MAC_ADDR_0_HIGH);
8097 addr0_low = tr32(MAC_ADDR_0_LOW);
8098 addr1_high = tr32(MAC_ADDR_1_HIGH);
8099 addr1_low = tr32(MAC_ADDR_1_LOW);
8101 /* Skip MAC addr 1 if ASF is using it. */
8102 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8103 !(addr1_high == 0 && addr1_low == 0))
8106 spin_lock_bh(&tp->lock);
8107 __tg3_set_mac_addr(tp, skip_mac_1);
8108 spin_unlock_bh(&tp->lock);
8113 /* tp->lock is held. */
8114 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8115 dma_addr_t mapping, u32 maxlen_flags,
8119 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8120 ((u64) mapping >> 32));
8122 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8123 ((u64) mapping & 0xffffffff));
8125 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8128 if (!tg3_flag(tp, 5705_PLUS))
8130 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8134 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8138 if (!tg3_flag(tp, ENABLE_TSS)) {
8139 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8140 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8141 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8143 tw32(HOSTCC_TXCOL_TICKS, 0);
8144 tw32(HOSTCC_TXMAX_FRAMES, 0);
8145 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8148 if (!tg3_flag(tp, ENABLE_RSS)) {
8149 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8150 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8151 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8153 tw32(HOSTCC_RXCOL_TICKS, 0);
8154 tw32(HOSTCC_RXMAX_FRAMES, 0);
8155 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8158 if (!tg3_flag(tp, 5705_PLUS)) {
8159 u32 val = ec->stats_block_coalesce_usecs;
8161 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8162 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8164 if (!netif_carrier_ok(tp->dev))
8167 tw32(HOSTCC_STAT_COAL_TICKS, val);
8170 for (i = 0; i < tp->irq_cnt - 1; i++) {
8173 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8174 tw32(reg, ec->rx_coalesce_usecs);
8175 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8176 tw32(reg, ec->rx_max_coalesced_frames);
8177 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8178 tw32(reg, ec->rx_max_coalesced_frames_irq);
8180 if (tg3_flag(tp, ENABLE_TSS)) {
8181 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8182 tw32(reg, ec->tx_coalesce_usecs);
8183 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8184 tw32(reg, ec->tx_max_coalesced_frames);
8185 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8186 tw32(reg, ec->tx_max_coalesced_frames_irq);
8190 for (; i < tp->irq_max - 1; i++) {
8191 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8192 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8193 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8195 if (tg3_flag(tp, ENABLE_TSS)) {
8196 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8197 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8198 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8203 /* tp->lock is held. */
8204 static void tg3_rings_reset(struct tg3 *tp)
8207 u32 stblk, txrcb, rxrcb, limit;
8208 struct tg3_napi *tnapi = &tp->napi[0];
8210 /* Disable all transmit rings but the first. */
8211 if (!tg3_flag(tp, 5705_PLUS))
8212 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8213 else if (tg3_flag(tp, 5717_PLUS))
8214 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8215 else if (tg3_flag(tp, 57765_CLASS))
8216 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8218 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8220 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8221 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8222 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8223 BDINFO_FLAGS_DISABLED);
8226 /* Disable all receive return rings but the first. */
8227 if (tg3_flag(tp, 5717_PLUS))
8228 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8229 else if (!tg3_flag(tp, 5705_PLUS))
8230 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8231 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8232 tg3_flag(tp, 57765_CLASS))
8233 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8235 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8237 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8238 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8239 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8240 BDINFO_FLAGS_DISABLED);
8242 /* Disable interrupts */
8243 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8244 tp->napi[0].chk_msi_cnt = 0;
8245 tp->napi[0].last_rx_cons = 0;
8246 tp->napi[0].last_tx_cons = 0;
8248 /* Zero mailbox registers. */
8249 if (tg3_flag(tp, SUPPORT_MSIX)) {
8250 for (i = 1; i < tp->irq_max; i++) {
8251 tp->napi[i].tx_prod = 0;
8252 tp->napi[i].tx_cons = 0;
8253 if (tg3_flag(tp, ENABLE_TSS))
8254 tw32_mailbox(tp->napi[i].prodmbox, 0);
8255 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8256 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8257 tp->napi[i].chk_msi_cnt = 0;
8258 tp->napi[i].last_rx_cons = 0;
8259 tp->napi[i].last_tx_cons = 0;
8261 if (!tg3_flag(tp, ENABLE_TSS))
8262 tw32_mailbox(tp->napi[0].prodmbox, 0);
8264 tp->napi[0].tx_prod = 0;
8265 tp->napi[0].tx_cons = 0;
8266 tw32_mailbox(tp->napi[0].prodmbox, 0);
8267 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8270 /* Make sure the NIC-based send BD rings are disabled. */
8271 if (!tg3_flag(tp, 5705_PLUS)) {
8272 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8273 for (i = 0; i < 16; i++)
8274 tw32_tx_mbox(mbox + i * 8, 0);
8277 txrcb = NIC_SRAM_SEND_RCB;
8278 rxrcb = NIC_SRAM_RCV_RET_RCB;
8280 /* Clear status block in ram. */
8281 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8283 /* Set status block DMA address */
8284 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8285 ((u64) tnapi->status_mapping >> 32));
8286 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8287 ((u64) tnapi->status_mapping & 0xffffffff));
8289 if (tnapi->tx_ring) {
8290 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8291 (TG3_TX_RING_SIZE <<
8292 BDINFO_FLAGS_MAXLEN_SHIFT),
8293 NIC_SRAM_TX_BUFFER_DESC);
8294 txrcb += TG3_BDINFO_SIZE;
8297 if (tnapi->rx_rcb) {
8298 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8299 (tp->rx_ret_ring_mask + 1) <<
8300 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8301 rxrcb += TG3_BDINFO_SIZE;
8304 stblk = HOSTCC_STATBLCK_RING1;
8306 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8307 u64 mapping = (u64)tnapi->status_mapping;
8308 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8309 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8311 /* Clear status block in ram. */
8312 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8314 if (tnapi->tx_ring) {
8315 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8316 (TG3_TX_RING_SIZE <<
8317 BDINFO_FLAGS_MAXLEN_SHIFT),
8318 NIC_SRAM_TX_BUFFER_DESC);
8319 txrcb += TG3_BDINFO_SIZE;
8322 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8323 ((tp->rx_ret_ring_mask + 1) <<
8324 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8327 rxrcb += TG3_BDINFO_SIZE;
8331 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8333 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8335 if (!tg3_flag(tp, 5750_PLUS) ||
8336 tg3_flag(tp, 5780_CLASS) ||
8337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8338 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8339 tg3_flag(tp, 57765_PLUS))
8340 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8341 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8342 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8343 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8345 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8347 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8348 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8350 val = min(nic_rep_thresh, host_rep_thresh);
8351 tw32(RCVBDI_STD_THRESH, val);
8353 if (tg3_flag(tp, 57765_PLUS))
8354 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8356 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8359 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8361 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8363 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8364 tw32(RCVBDI_JUMBO_THRESH, val);
8366 if (tg3_flag(tp, 57765_PLUS))
8367 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8370 static inline u32 calc_crc(unsigned char *buf, int len)
8378 for (j = 0; j < len; j++) {
8381 for (k = 0; k < 8; k++) {
8394 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8396 /* accept or reject all multicast frames */
8397 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8398 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8399 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8400 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8403 static void __tg3_set_rx_mode(struct net_device *dev)
8405 struct tg3 *tp = netdev_priv(dev);
8408 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8409 RX_MODE_KEEP_VLAN_TAG);
8411 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8412 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8415 if (!tg3_flag(tp, ENABLE_ASF))
8416 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8419 if (dev->flags & IFF_PROMISC) {
8420 /* Promiscuous mode. */
8421 rx_mode |= RX_MODE_PROMISC;
8422 } else if (dev->flags & IFF_ALLMULTI) {
8423 /* Accept all multicast. */
8424 tg3_set_multi(tp, 1);
8425 } else if (netdev_mc_empty(dev)) {
8426 /* Reject all multicast. */
8427 tg3_set_multi(tp, 0);
8429 /* Accept one or more multicast(s). */
8430 struct netdev_hw_addr *ha;
8431 u32 mc_filter[4] = { 0, };
8436 netdev_for_each_mc_addr(ha, dev) {
8437 crc = calc_crc(ha->addr, ETH_ALEN);
8439 regidx = (bit & 0x60) >> 5;
8441 mc_filter[regidx] |= (1 << bit);
8444 tw32(MAC_HASH_REG_0, mc_filter[0]);
8445 tw32(MAC_HASH_REG_1, mc_filter[1]);
8446 tw32(MAC_HASH_REG_2, mc_filter[2]);
8447 tw32(MAC_HASH_REG_3, mc_filter[3]);
8450 if (rx_mode != tp->rx_mode) {
8451 tp->rx_mode = rx_mode;
8452 tw32_f(MAC_RX_MODE, rx_mode);
8457 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8461 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8462 tp->rss_ind_tbl[i] =
8463 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8466 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8470 if (!tg3_flag(tp, SUPPORT_MSIX))
8473 if (tp->irq_cnt <= 2) {
8474 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8478 /* Validate table against current IRQ count */
8479 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8480 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8484 if (i != TG3_RSS_INDIR_TBL_SIZE)
8485 tg3_rss_init_dflt_indir_tbl(tp);
8488 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8491 u32 reg = MAC_RSS_INDIR_TBL_0;
8493 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8494 u32 val = tp->rss_ind_tbl[i];
8496 for (; i % 8; i++) {
8498 val |= tp->rss_ind_tbl[i];
8505 /* tp->lock is held. */
8506 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8508 u32 val, rdmac_mode;
8510 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8512 tg3_disable_ints(tp);
8516 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8518 if (tg3_flag(tp, INIT_COMPLETE))
8519 tg3_abort_hw(tp, 1);
8521 /* Enable MAC control of LPI */
8522 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8523 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8524 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8525 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8527 tw32_f(TG3_CPMU_EEE_CTRL,
8528 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8530 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8531 TG3_CPMU_EEEMD_LPI_IN_TX |
8532 TG3_CPMU_EEEMD_LPI_IN_RX |
8533 TG3_CPMU_EEEMD_EEE_ENABLE;
8535 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8536 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8538 if (tg3_flag(tp, ENABLE_APE))
8539 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8541 tw32_f(TG3_CPMU_EEE_MODE, val);
8543 tw32_f(TG3_CPMU_EEE_DBTMR1,
8544 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8545 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8547 tw32_f(TG3_CPMU_EEE_DBTMR2,
8548 TG3_CPMU_DBTMR2_APE_TX_2047US |
8549 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8555 err = tg3_chip_reset(tp);
8559 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8561 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8562 val = tr32(TG3_CPMU_CTRL);
8563 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8564 tw32(TG3_CPMU_CTRL, val);
8566 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8567 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8568 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8569 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8571 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8572 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8573 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8574 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8576 val = tr32(TG3_CPMU_HST_ACC);
8577 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8578 val |= CPMU_HST_ACC_MACCLK_6_25;
8579 tw32(TG3_CPMU_HST_ACC, val);
8582 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8583 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8584 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8585 PCIE_PWR_MGMT_L1_THRESH_4MS;
8586 tw32(PCIE_PWR_MGMT_THRESH, val);
8588 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8589 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8591 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8593 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8594 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8597 if (tg3_flag(tp, L1PLLPD_EN)) {
8598 u32 grc_mode = tr32(GRC_MODE);
8600 /* Access the lower 1K of PL PCIE block registers. */
8601 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8602 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8604 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8605 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8606 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8608 tw32(GRC_MODE, grc_mode);
8611 if (tg3_flag(tp, 57765_CLASS)) {
8612 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8613 u32 grc_mode = tr32(GRC_MODE);
8615 /* Access the lower 1K of PL PCIE block registers. */
8616 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8617 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8619 val = tr32(TG3_PCIE_TLDLPL_PORT +
8620 TG3_PCIE_PL_LO_PHYCTL5);
8621 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8622 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8624 tw32(GRC_MODE, grc_mode);
8627 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8628 u32 grc_mode = tr32(GRC_MODE);
8630 /* Access the lower 1K of DL PCIE block registers. */
8631 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8632 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8634 val = tr32(TG3_PCIE_TLDLPL_PORT +
8635 TG3_PCIE_DL_LO_FTSMAX);
8636 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8637 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8638 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8640 tw32(GRC_MODE, grc_mode);
8643 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8644 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8645 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8646 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8649 /* This works around an issue with Athlon chipsets on
8650 * B3 tigon3 silicon. This bit has no effect on any
8651 * other revision. But do not set this on PCI Express
8652 * chips and don't even touch the clocks if the CPMU is present.
8654 if (!tg3_flag(tp, CPMU_PRESENT)) {
8655 if (!tg3_flag(tp, PCI_EXPRESS))
8656 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8657 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8660 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8661 tg3_flag(tp, PCIX_MODE)) {
8662 val = tr32(TG3PCI_PCISTATE);
8663 val |= PCISTATE_RETRY_SAME_DMA;
8664 tw32(TG3PCI_PCISTATE, val);
8667 if (tg3_flag(tp, ENABLE_APE)) {
8668 /* Allow reads and writes to the
8669 * APE register and memory space.
8671 val = tr32(TG3PCI_PCISTATE);
8672 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8673 PCISTATE_ALLOW_APE_SHMEM_WR |
8674 PCISTATE_ALLOW_APE_PSPACE_WR;
8675 tw32(TG3PCI_PCISTATE, val);
8678 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8679 /* Enable some hw fixes. */
8680 val = tr32(TG3PCI_MSI_DATA);
8681 val |= (1 << 26) | (1 << 28) | (1 << 29);
8682 tw32(TG3PCI_MSI_DATA, val);
8685 /* Descriptor ring init may make accesses to the
8686 * NIC SRAM area to setup the TX descriptors, so we
8687 * can only do this after the hardware has been
8688 * successfully reset.
8690 err = tg3_init_rings(tp);
8694 if (tg3_flag(tp, 57765_PLUS)) {
8695 val = tr32(TG3PCI_DMA_RW_CTRL) &
8696 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8697 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8698 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8699 if (!tg3_flag(tp, 57765_CLASS) &&
8700 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8701 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8702 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8703 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8704 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8705 /* This value is determined during the probe time DMA
8706 * engine test, tg3_test_dma.
8708 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8711 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8712 GRC_MODE_4X_NIC_SEND_RINGS |
8713 GRC_MODE_NO_TX_PHDR_CSUM |
8714 GRC_MODE_NO_RX_PHDR_CSUM);
8715 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8717 /* Pseudo-header checksum is done by hardware logic and not
8718 * the offload processers, so make the chip do the pseudo-
8719 * header checksums on receive. For transmit it is more
8720 * convenient to do the pseudo-header checksum in software
8721 * as Linux does that on transmit for us in all cases.
8723 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8727 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8729 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8730 val = tr32(GRC_MISC_CFG);
8732 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8733 tw32(GRC_MISC_CFG, val);
8735 /* Initialize MBUF/DESC pool. */
8736 if (tg3_flag(tp, 5750_PLUS)) {
8738 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8739 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8741 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8743 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8744 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8745 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8746 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8749 fw_len = tp->fw_len;
8750 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8751 tw32(BUFMGR_MB_POOL_ADDR,
8752 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8753 tw32(BUFMGR_MB_POOL_SIZE,
8754 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8757 if (tp->dev->mtu <= ETH_DATA_LEN) {
8758 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8759 tp->bufmgr_config.mbuf_read_dma_low_water);
8760 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8761 tp->bufmgr_config.mbuf_mac_rx_low_water);
8762 tw32(BUFMGR_MB_HIGH_WATER,
8763 tp->bufmgr_config.mbuf_high_water);
8765 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8766 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8767 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8768 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8769 tw32(BUFMGR_MB_HIGH_WATER,
8770 tp->bufmgr_config.mbuf_high_water_jumbo);
8772 tw32(BUFMGR_DMA_LOW_WATER,
8773 tp->bufmgr_config.dma_low_water);
8774 tw32(BUFMGR_DMA_HIGH_WATER,
8775 tp->bufmgr_config.dma_high_water);
8777 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8778 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8779 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8780 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8781 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8782 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8783 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8784 tw32(BUFMGR_MODE, val);
8785 for (i = 0; i < 2000; i++) {
8786 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8791 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8795 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8796 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8798 tg3_setup_rxbd_thresholds(tp);
8800 /* Initialize TG3_BDINFO's at:
8801 * RCVDBDI_STD_BD: standard eth size rx ring
8802 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8803 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8806 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8807 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8808 * ring attribute flags
8809 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8811 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8812 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8814 * The size of each ring is fixed in the firmware, but the location is
8817 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8818 ((u64) tpr->rx_std_mapping >> 32));
8819 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8820 ((u64) tpr->rx_std_mapping & 0xffffffff));
8821 if (!tg3_flag(tp, 5717_PLUS))
8822 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8823 NIC_SRAM_RX_BUFFER_DESC);
8825 /* Disable the mini ring */
8826 if (!tg3_flag(tp, 5705_PLUS))
8827 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8828 BDINFO_FLAGS_DISABLED);
8830 /* Program the jumbo buffer descriptor ring control
8831 * blocks on those devices that have them.
8833 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8834 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8836 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8837 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8838 ((u64) tpr->rx_jmb_mapping >> 32));
8839 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8840 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8841 val = TG3_RX_JMB_RING_SIZE(tp) <<
8842 BDINFO_FLAGS_MAXLEN_SHIFT;
8843 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8844 val | BDINFO_FLAGS_USE_EXT_RECV);
8845 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8846 tg3_flag(tp, 57765_CLASS))
8847 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8848 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8850 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8851 BDINFO_FLAGS_DISABLED);
8854 if (tg3_flag(tp, 57765_PLUS)) {
8855 val = TG3_RX_STD_RING_SIZE(tp);
8856 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8857 val |= (TG3_RX_STD_DMA_SZ << 2);
8859 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8861 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8863 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8865 tpr->rx_std_prod_idx = tp->rx_pending;
8866 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8868 tpr->rx_jmb_prod_idx =
8869 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8870 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8872 tg3_rings_reset(tp);
8874 /* Initialize MAC address and backoff seed. */
8875 __tg3_set_mac_addr(tp, 0);
8877 /* MTU + ethernet header + FCS + optional VLAN tag */
8878 tw32(MAC_RX_MTU_SIZE,
8879 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8881 /* The slot time is changed by tg3_setup_phy if we
8882 * run at gigabit with half duplex.
8884 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8885 (6 << TX_LENGTHS_IPG_SHIFT) |
8886 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8888 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8889 val |= tr32(MAC_TX_LENGTHS) &
8890 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8891 TX_LENGTHS_CNT_DWN_VAL_MSK);
8893 tw32(MAC_TX_LENGTHS, val);
8895 /* Receive rules. */
8896 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8897 tw32(RCVLPC_CONFIG, 0x0181);
8899 /* Calculate RDMAC_MODE setting early, we need it to determine
8900 * the RCVLPC_STATE_ENABLE mask.
8902 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8903 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8904 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8905 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8906 RDMAC_MODE_LNGREAD_ENAB);
8908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8909 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8912 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8914 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8915 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8916 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8919 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8920 if (tg3_flag(tp, TSO_CAPABLE) &&
8921 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8922 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8923 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8924 !tg3_flag(tp, IS_5788)) {
8925 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8929 if (tg3_flag(tp, PCI_EXPRESS))
8930 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8932 if (tg3_flag(tp, HW_TSO_1) ||
8933 tg3_flag(tp, HW_TSO_2) ||
8934 tg3_flag(tp, HW_TSO_3))
8935 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8937 if (tg3_flag(tp, 57765_PLUS) ||
8938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8940 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8943 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8945 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8946 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8947 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8948 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8949 tg3_flag(tp, 57765_PLUS)) {
8950 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8951 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8952 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8953 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8954 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8955 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8956 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8957 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8958 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8960 tw32(TG3_RDMA_RSRVCTRL_REG,
8961 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8965 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8966 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8967 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8968 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8969 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8972 /* Receive/send statistics. */
8973 if (tg3_flag(tp, 5750_PLUS)) {
8974 val = tr32(RCVLPC_STATS_ENABLE);
8975 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8976 tw32(RCVLPC_STATS_ENABLE, val);
8977 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8978 tg3_flag(tp, TSO_CAPABLE)) {
8979 val = tr32(RCVLPC_STATS_ENABLE);
8980 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8981 tw32(RCVLPC_STATS_ENABLE, val);
8983 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8985 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8986 tw32(SNDDATAI_STATSENAB, 0xffffff);
8987 tw32(SNDDATAI_STATSCTRL,
8988 (SNDDATAI_SCTRL_ENABLE |
8989 SNDDATAI_SCTRL_FASTUPD));
8991 /* Setup host coalescing engine. */
8992 tw32(HOSTCC_MODE, 0);
8993 for (i = 0; i < 2000; i++) {
8994 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8999 __tg3_set_coalesce(tp, &tp->coal);
9001 if (!tg3_flag(tp, 5705_PLUS)) {
9002 /* Status/statistics block address. See tg3_timer,
9003 * the tg3_periodic_fetch_stats call there, and
9004 * tg3_get_stats to see how this works for 5705/5750 chips.
9006 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9007 ((u64) tp->stats_mapping >> 32));
9008 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9009 ((u64) tp->stats_mapping & 0xffffffff));
9010 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9012 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9014 /* Clear statistics and status block memory areas */
9015 for (i = NIC_SRAM_STATS_BLK;
9016 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9018 tg3_write_mem(tp, i, 0);
9023 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9025 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9026 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9027 if (!tg3_flag(tp, 5705_PLUS))
9028 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9030 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9031 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9032 /* reset to prevent losing 1st rx packet intermittently */
9033 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9037 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9038 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9039 MAC_MODE_FHDE_ENABLE;
9040 if (tg3_flag(tp, ENABLE_APE))
9041 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9042 if (!tg3_flag(tp, 5705_PLUS) &&
9043 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9044 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9045 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9046 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9049 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9050 * If TG3_FLAG_IS_NIC is zero, we should read the
9051 * register to preserve the GPIO settings for LOMs. The GPIOs,
9052 * whether used as inputs or outputs, are set by boot code after
9055 if (!tg3_flag(tp, IS_NIC)) {
9058 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9059 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9060 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9063 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9064 GRC_LCLCTRL_GPIO_OUTPUT3;
9066 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9067 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9069 tp->grc_local_ctrl &= ~gpio_mask;
9070 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9072 /* GPIO1 must be driven high for eeprom write protect */
9073 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9074 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9075 GRC_LCLCTRL_GPIO_OUTPUT1);
9077 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9080 if (tg3_flag(tp, USING_MSIX)) {
9081 val = tr32(MSGINT_MODE);
9082 val |= MSGINT_MODE_ENABLE;
9083 if (tp->irq_cnt > 1)
9084 val |= MSGINT_MODE_MULTIVEC_EN;
9085 if (!tg3_flag(tp, 1SHOT_MSI))
9086 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9087 tw32(MSGINT_MODE, val);
9090 if (!tg3_flag(tp, 5705_PLUS)) {
9091 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9095 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9096 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9097 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9098 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9099 WDMAC_MODE_LNGREAD_ENAB);
9101 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9102 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9103 if (tg3_flag(tp, TSO_CAPABLE) &&
9104 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9105 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9107 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9108 !tg3_flag(tp, IS_5788)) {
9109 val |= WDMAC_MODE_RX_ACCEL;
9113 /* Enable host coalescing bug fix */
9114 if (tg3_flag(tp, 5755_PLUS))
9115 val |= WDMAC_MODE_STATUS_TAG_FIX;
9117 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9118 val |= WDMAC_MODE_BURST_ALL_DATA;
9120 tw32_f(WDMAC_MODE, val);
9123 if (tg3_flag(tp, PCIX_MODE)) {
9126 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9128 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9129 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9130 pcix_cmd |= PCI_X_CMD_READ_2K;
9131 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9132 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9133 pcix_cmd |= PCI_X_CMD_READ_2K;
9135 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9139 tw32_f(RDMAC_MODE, rdmac_mode);
9142 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9143 if (!tg3_flag(tp, 5705_PLUS))
9144 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9148 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9150 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9152 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9153 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9154 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9155 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9156 val |= RCVDBDI_MODE_LRG_RING_SZ;
9157 tw32(RCVDBDI_MODE, val);
9158 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9159 if (tg3_flag(tp, HW_TSO_1) ||
9160 tg3_flag(tp, HW_TSO_2) ||
9161 tg3_flag(tp, HW_TSO_3))
9162 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9163 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9164 if (tg3_flag(tp, ENABLE_TSS))
9165 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9166 tw32(SNDBDI_MODE, val);
9167 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9169 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9170 err = tg3_load_5701_a0_firmware_fix(tp);
9175 if (tg3_flag(tp, TSO_CAPABLE)) {
9176 err = tg3_load_tso_firmware(tp);
9181 tp->tx_mode = TX_MODE_ENABLE;
9183 if (tg3_flag(tp, 5755_PLUS) ||
9184 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9185 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9187 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9188 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9189 tp->tx_mode &= ~val;
9190 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9193 tw32_f(MAC_TX_MODE, tp->tx_mode);
9196 if (tg3_flag(tp, ENABLE_RSS)) {
9197 tg3_rss_write_indir_tbl(tp);
9199 /* Setup the "secret" hash key. */
9200 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9201 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9202 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9203 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9204 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9205 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9206 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9207 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9208 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9209 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9212 tp->rx_mode = RX_MODE_ENABLE;
9213 if (tg3_flag(tp, 5755_PLUS))
9214 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9216 if (tg3_flag(tp, ENABLE_RSS))
9217 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9218 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9219 RX_MODE_RSS_IPV6_HASH_EN |
9220 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9221 RX_MODE_RSS_IPV4_HASH_EN |
9222 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9224 tw32_f(MAC_RX_MODE, tp->rx_mode);
9227 tw32(MAC_LED_CTRL, tp->led_ctrl);
9229 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9230 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9231 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9234 tw32_f(MAC_RX_MODE, tp->rx_mode);
9237 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9238 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9239 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9240 /* Set drive transmission level to 1.2V */
9241 /* only if the signal pre-emphasis bit is not set */
9242 val = tr32(MAC_SERDES_CFG);
9245 tw32(MAC_SERDES_CFG, val);
9247 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9248 tw32(MAC_SERDES_CFG, 0x616000);
9251 /* Prevent chip from dropping frames when flow control
9254 if (tg3_flag(tp, 57765_CLASS))
9258 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9261 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9262 /* Use hardware link auto-negotiation */
9263 tg3_flag_set(tp, HW_AUTONEG);
9266 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9267 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9270 tmp = tr32(SERDES_RX_CTRL);
9271 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9272 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9273 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9274 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9277 if (!tg3_flag(tp, USE_PHYLIB)) {
9278 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9279 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9281 err = tg3_setup_phy(tp, 0);
9285 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9286 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9289 /* Clear CRC stats. */
9290 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9291 tg3_writephy(tp, MII_TG3_TEST1,
9292 tmp | MII_TG3_TEST1_CRC_EN);
9293 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9298 __tg3_set_rx_mode(tp->dev);
9300 /* Initialize receive rules. */
9301 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9302 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9303 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9304 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9306 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9310 if (tg3_flag(tp, ENABLE_ASF))
9314 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9316 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9318 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9320 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9322 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9324 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9326 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9328 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9330 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9332 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9334 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9336 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9338 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9340 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9348 if (tg3_flag(tp, ENABLE_APE))
9349 /* Write our heartbeat update interval to APE. */
9350 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9351 APE_HOST_HEARTBEAT_INT_DISABLE);
9353 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9358 /* Called at device open time to get the chip ready for
9359 * packet processing. Invoked with tp->lock held.
9361 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9363 tg3_switch_clocks(tp);
9365 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9367 return tg3_reset_hw(tp, reset_phy);
9370 #define TG3_STAT_ADD32(PSTAT, REG) \
9371 do { u32 __val = tr32(REG); \
9372 (PSTAT)->low += __val; \
9373 if ((PSTAT)->low < __val) \
9374 (PSTAT)->high += 1; \
9377 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9379 struct tg3_hw_stats *sp = tp->hw_stats;
9381 if (!netif_carrier_ok(tp->dev))
9384 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9385 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9386 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9387 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9388 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9389 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9390 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9391 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9392 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9393 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9394 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9395 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9396 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9398 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9399 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9400 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9401 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9402 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9403 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9404 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9405 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9406 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9407 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9408 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9409 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9410 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9411 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9413 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9414 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9415 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9416 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9417 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9419 u32 val = tr32(HOSTCC_FLOW_ATTN);
9420 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9422 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9423 sp->rx_discards.low += val;
9424 if (sp->rx_discards.low < val)
9425 sp->rx_discards.high += 1;
9427 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9429 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9432 static void tg3_chk_missed_msi(struct tg3 *tp)
9436 for (i = 0; i < tp->irq_cnt; i++) {
9437 struct tg3_napi *tnapi = &tp->napi[i];
9439 if (tg3_has_work(tnapi)) {
9440 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9441 tnapi->last_tx_cons == tnapi->tx_cons) {
9442 if (tnapi->chk_msi_cnt < 1) {
9443 tnapi->chk_msi_cnt++;
9449 tnapi->chk_msi_cnt = 0;
9450 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9451 tnapi->last_tx_cons = tnapi->tx_cons;
9455 static void tg3_timer(unsigned long __opaque)
9457 struct tg3 *tp = (struct tg3 *) __opaque;
9459 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9462 spin_lock(&tp->lock);
9464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9465 tg3_flag(tp, 57765_CLASS))
9466 tg3_chk_missed_msi(tp);
9468 if (!tg3_flag(tp, TAGGED_STATUS)) {
9469 /* All of this garbage is because when using non-tagged
9470 * IRQ status the mailbox/status_block protocol the chip
9471 * uses with the cpu is race prone.
9473 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9474 tw32(GRC_LOCAL_CTRL,
9475 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9477 tw32(HOSTCC_MODE, tp->coalesce_mode |
9478 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9481 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9482 spin_unlock(&tp->lock);
9483 tg3_reset_task_schedule(tp);
9488 /* This part only runs once per second. */
9489 if (!--tp->timer_counter) {
9490 if (tg3_flag(tp, 5705_PLUS))
9491 tg3_periodic_fetch_stats(tp);
9493 if (tp->setlpicnt && !--tp->setlpicnt)
9494 tg3_phy_eee_enable(tp);
9496 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9500 mac_stat = tr32(MAC_STATUS);
9503 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9504 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9506 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9510 tg3_setup_phy(tp, 0);
9511 } else if (tg3_flag(tp, POLL_SERDES)) {
9512 u32 mac_stat = tr32(MAC_STATUS);
9515 if (netif_carrier_ok(tp->dev) &&
9516 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9519 if (!netif_carrier_ok(tp->dev) &&
9520 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9521 MAC_STATUS_SIGNAL_DET))) {
9525 if (!tp->serdes_counter) {
9528 ~MAC_MODE_PORT_MODE_MASK));
9530 tw32_f(MAC_MODE, tp->mac_mode);
9533 tg3_setup_phy(tp, 0);
9535 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9536 tg3_flag(tp, 5780_CLASS)) {
9537 tg3_serdes_parallel_detect(tp);
9540 tp->timer_counter = tp->timer_multiplier;
9543 /* Heartbeat is only sent once every 2 seconds.
9545 * The heartbeat is to tell the ASF firmware that the host
9546 * driver is still alive. In the event that the OS crashes,
9547 * ASF needs to reset the hardware to free up the FIFO space
9548 * that may be filled with rx packets destined for the host.
9549 * If the FIFO is full, ASF will no longer function properly.
9551 * Unintended resets have been reported on real time kernels
9552 * where the timer doesn't run on time. Netpoll will also have
9555 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9556 * to check the ring condition when the heartbeat is expiring
9557 * before doing the reset. This will prevent most unintended
9560 if (!--tp->asf_counter) {
9561 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9562 tg3_wait_for_event_ack(tp);
9564 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9565 FWCMD_NICDRV_ALIVE3);
9566 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9567 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9568 TG3_FW_UPDATE_TIMEOUT_SEC);
9570 tg3_generate_fw_event(tp);
9572 tp->asf_counter = tp->asf_multiplier;
9575 spin_unlock(&tp->lock);
9578 tp->timer.expires = jiffies + tp->timer_offset;
9579 add_timer(&tp->timer);
9582 static void __devinit tg3_timer_init(struct tg3 *tp)
9584 if (tg3_flag(tp, TAGGED_STATUS) &&
9585 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9586 !tg3_flag(tp, 57765_CLASS))
9587 tp->timer_offset = HZ;
9589 tp->timer_offset = HZ / 10;
9591 BUG_ON(tp->timer_offset > HZ);
9593 tp->timer_multiplier = (HZ / tp->timer_offset);
9594 tp->asf_multiplier = (HZ / tp->timer_offset) *
9595 TG3_FW_UPDATE_FREQ_SEC;
9597 init_timer(&tp->timer);
9598 tp->timer.data = (unsigned long) tp;
9599 tp->timer.function = tg3_timer;
9602 static void tg3_timer_start(struct tg3 *tp)
9604 tp->asf_counter = tp->asf_multiplier;
9605 tp->timer_counter = tp->timer_multiplier;
9607 tp->timer.expires = jiffies + tp->timer_offset;
9608 add_timer(&tp->timer);
9611 static void tg3_timer_stop(struct tg3 *tp)
9613 del_timer_sync(&tp->timer);
9616 /* Restart hardware after configuration changes, self-test, etc.
9617 * Invoked with tp->lock held.
9619 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9620 __releases(tp->lock)
9621 __acquires(tp->lock)
9625 err = tg3_init_hw(tp, reset_phy);
9628 "Failed to re-initialize device, aborting\n");
9629 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9630 tg3_full_unlock(tp);
9633 tg3_napi_enable(tp);
9635 tg3_full_lock(tp, 0);
9640 static void tg3_reset_task(struct work_struct *work)
9642 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9645 tg3_full_lock(tp, 0);
9647 if (!netif_running(tp->dev)) {
9648 tg3_flag_clear(tp, RESET_TASK_PENDING);
9649 tg3_full_unlock(tp);
9653 tg3_full_unlock(tp);
9659 tg3_full_lock(tp, 1);
9661 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9662 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9663 tp->write32_rx_mbox = tg3_write_flush_reg32;
9664 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9665 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9668 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9669 err = tg3_init_hw(tp, 1);
9673 tg3_netif_start(tp);
9676 tg3_full_unlock(tp);
9681 tg3_flag_clear(tp, RESET_TASK_PENDING);
9684 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9687 unsigned long flags;
9689 struct tg3_napi *tnapi = &tp->napi[irq_num];
9691 if (tp->irq_cnt == 1)
9692 name = tp->dev->name;
9694 name = &tnapi->irq_lbl[0];
9695 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9696 name[IFNAMSIZ-1] = 0;
9699 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9701 if (tg3_flag(tp, 1SHOT_MSI))
9706 if (tg3_flag(tp, TAGGED_STATUS))
9707 fn = tg3_interrupt_tagged;
9708 flags = IRQF_SHARED;
9711 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9714 static int tg3_test_interrupt(struct tg3 *tp)
9716 struct tg3_napi *tnapi = &tp->napi[0];
9717 struct net_device *dev = tp->dev;
9718 int err, i, intr_ok = 0;
9721 if (!netif_running(dev))
9724 tg3_disable_ints(tp);
9726 free_irq(tnapi->irq_vec, tnapi);
9729 * Turn off MSI one shot mode. Otherwise this test has no
9730 * observable way to know whether the interrupt was delivered.
9732 if (tg3_flag(tp, 57765_PLUS)) {
9733 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9734 tw32(MSGINT_MODE, val);
9737 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9738 IRQF_SHARED, dev->name, tnapi);
9742 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9743 tg3_enable_ints(tp);
9745 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9748 for (i = 0; i < 5; i++) {
9749 u32 int_mbox, misc_host_ctrl;
9751 int_mbox = tr32_mailbox(tnapi->int_mbox);
9752 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9754 if ((int_mbox != 0) ||
9755 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9760 if (tg3_flag(tp, 57765_PLUS) &&
9761 tnapi->hw_status->status_tag != tnapi->last_tag)
9762 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9767 tg3_disable_ints(tp);
9769 free_irq(tnapi->irq_vec, tnapi);
9771 err = tg3_request_irq(tp, 0);
9777 /* Reenable MSI one shot mode. */
9778 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9779 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9780 tw32(MSGINT_MODE, val);
9788 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9789 * successfully restored
9791 static int tg3_test_msi(struct tg3 *tp)
9796 if (!tg3_flag(tp, USING_MSI))
9799 /* Turn off SERR reporting in case MSI terminates with Master
9802 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9803 pci_write_config_word(tp->pdev, PCI_COMMAND,
9804 pci_cmd & ~PCI_COMMAND_SERR);
9806 err = tg3_test_interrupt(tp);
9808 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9813 /* other failures */
9817 /* MSI test failed, go back to INTx mode */
9818 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9819 "to INTx mode. Please report this failure to the PCI "
9820 "maintainer and include system chipset information\n");
9822 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9824 pci_disable_msi(tp->pdev);
9826 tg3_flag_clear(tp, USING_MSI);
9827 tp->napi[0].irq_vec = tp->pdev->irq;
9829 err = tg3_request_irq(tp, 0);
9833 /* Need to reset the chip because the MSI cycle may have terminated
9834 * with Master Abort.
9836 tg3_full_lock(tp, 1);
9838 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9839 err = tg3_init_hw(tp, 1);
9841 tg3_full_unlock(tp);
9844 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9849 static int tg3_request_firmware(struct tg3 *tp)
9851 const __be32 *fw_data;
9853 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9854 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9859 fw_data = (void *)tp->fw->data;
9861 /* Firmware blob starts with version numbers, followed by
9862 * start address and _full_ length including BSS sections
9863 * (which must be longer than the actual data, of course
9866 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9867 if (tp->fw_len < (tp->fw->size - 12)) {
9868 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9869 tp->fw_len, tp->fw_needed);
9870 release_firmware(tp->fw);
9875 /* We no longer need firmware; we have it. */
9876 tp->fw_needed = NULL;
9880 static bool tg3_enable_msix(struct tg3 *tp)
9883 struct msix_entry msix_ent[tp->irq_max];
9885 tp->irq_cnt = num_online_cpus();
9886 if (tp->irq_cnt > 1) {
9887 /* We want as many rx rings enabled as there are cpus.
9888 * In multiqueue MSI-X mode, the first MSI-X vector
9889 * only deals with link interrupts, etc, so we add
9890 * one to the number of vectors we are requesting.
9892 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9895 for (i = 0; i < tp->irq_max; i++) {
9896 msix_ent[i].entry = i;
9897 msix_ent[i].vector = 0;
9900 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9903 } else if (rc != 0) {
9904 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9906 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9911 for (i = 0; i < tp->irq_max; i++)
9912 tp->napi[i].irq_vec = msix_ent[i].vector;
9914 netif_set_real_num_tx_queues(tp->dev, 1);
9915 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9916 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9917 pci_disable_msix(tp->pdev);
9921 if (tp->irq_cnt > 1) {
9922 tg3_flag_set(tp, ENABLE_RSS);
9924 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9925 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9926 tg3_flag_set(tp, ENABLE_TSS);
9927 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9934 static void tg3_ints_init(struct tg3 *tp)
9936 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9937 !tg3_flag(tp, TAGGED_STATUS)) {
9938 /* All MSI supporting chips should support tagged
9939 * status. Assert that this is the case.
9941 netdev_warn(tp->dev,
9942 "MSI without TAGGED_STATUS? Not using MSI\n");
9946 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9947 tg3_flag_set(tp, USING_MSIX);
9948 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9949 tg3_flag_set(tp, USING_MSI);
9951 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9952 u32 msi_mode = tr32(MSGINT_MODE);
9953 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9954 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9955 if (!tg3_flag(tp, 1SHOT_MSI))
9956 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9957 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9960 if (!tg3_flag(tp, USING_MSIX)) {
9962 tp->napi[0].irq_vec = tp->pdev->irq;
9963 netif_set_real_num_tx_queues(tp->dev, 1);
9964 netif_set_real_num_rx_queues(tp->dev, 1);
9968 static void tg3_ints_fini(struct tg3 *tp)
9970 if (tg3_flag(tp, USING_MSIX))
9971 pci_disable_msix(tp->pdev);
9972 else if (tg3_flag(tp, USING_MSI))
9973 pci_disable_msi(tp->pdev);
9974 tg3_flag_clear(tp, USING_MSI);
9975 tg3_flag_clear(tp, USING_MSIX);
9976 tg3_flag_clear(tp, ENABLE_RSS);
9977 tg3_flag_clear(tp, ENABLE_TSS);
9980 static int tg3_open(struct net_device *dev)
9982 struct tg3 *tp = netdev_priv(dev);
9985 if (tp->fw_needed) {
9986 err = tg3_request_firmware(tp);
9987 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9991 netdev_warn(tp->dev, "TSO capability disabled\n");
9992 tg3_flag_clear(tp, TSO_CAPABLE);
9993 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9994 netdev_notice(tp->dev, "TSO capability restored\n");
9995 tg3_flag_set(tp, TSO_CAPABLE);
9999 netif_carrier_off(tp->dev);
10001 err = tg3_power_up(tp);
10005 tg3_full_lock(tp, 0);
10007 tg3_disable_ints(tp);
10008 tg3_flag_clear(tp, INIT_COMPLETE);
10010 tg3_full_unlock(tp);
10013 * Setup interrupts first so we know how
10014 * many NAPI resources to allocate
10018 tg3_rss_check_indir_tbl(tp);
10020 /* The placement of this call is tied
10021 * to the setup and use of Host TX descriptors.
10023 err = tg3_alloc_consistent(tp);
10029 tg3_napi_enable(tp);
10031 for (i = 0; i < tp->irq_cnt; i++) {
10032 struct tg3_napi *tnapi = &tp->napi[i];
10033 err = tg3_request_irq(tp, i);
10035 for (i--; i >= 0; i--) {
10036 tnapi = &tp->napi[i];
10037 free_irq(tnapi->irq_vec, tnapi);
10043 tg3_full_lock(tp, 0);
10045 err = tg3_init_hw(tp, 1);
10047 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10048 tg3_free_rings(tp);
10051 tg3_full_unlock(tp);
10056 if (tg3_flag(tp, USING_MSI)) {
10057 err = tg3_test_msi(tp);
10060 tg3_full_lock(tp, 0);
10061 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10062 tg3_free_rings(tp);
10063 tg3_full_unlock(tp);
10068 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10069 u32 val = tr32(PCIE_TRANSACTION_CFG);
10071 tw32(PCIE_TRANSACTION_CFG,
10072 val | PCIE_TRANS_CFG_1SHOT_MSI);
10078 tg3_full_lock(tp, 0);
10080 tg3_timer_start(tp);
10081 tg3_flag_set(tp, INIT_COMPLETE);
10082 tg3_enable_ints(tp);
10084 tg3_full_unlock(tp);
10086 netif_tx_start_all_queues(dev);
10089 * Reset loopback feature if it was turned on while the device was down
10090 * make sure that it's installed properly now.
10092 if (dev->features & NETIF_F_LOOPBACK)
10093 tg3_set_loopback(dev, dev->features);
10098 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10099 struct tg3_napi *tnapi = &tp->napi[i];
10100 free_irq(tnapi->irq_vec, tnapi);
10104 tg3_napi_disable(tp);
10106 tg3_free_consistent(tp);
10110 tg3_frob_aux_power(tp, false);
10111 pci_set_power_state(tp->pdev, PCI_D3hot);
10115 static int tg3_close(struct net_device *dev)
10118 struct tg3 *tp = netdev_priv(dev);
10120 tg3_napi_disable(tp);
10121 tg3_reset_task_cancel(tp);
10123 netif_tx_stop_all_queues(dev);
10125 tg3_timer_stop(tp);
10129 tg3_full_lock(tp, 1);
10131 tg3_disable_ints(tp);
10133 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10134 tg3_free_rings(tp);
10135 tg3_flag_clear(tp, INIT_COMPLETE);
10137 tg3_full_unlock(tp);
10139 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10140 struct tg3_napi *tnapi = &tp->napi[i];
10141 free_irq(tnapi->irq_vec, tnapi);
10146 /* Clear stats across close / open calls */
10147 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10148 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10152 tg3_free_consistent(tp);
10154 tg3_power_down(tp);
10156 netif_carrier_off(tp->dev);
10161 static inline u64 get_stat64(tg3_stat64_t *val)
10163 return ((u64)val->high << 32) | ((u64)val->low);
10166 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10168 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10170 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10171 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10175 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10176 tg3_writephy(tp, MII_TG3_TEST1,
10177 val | MII_TG3_TEST1_CRC_EN);
10178 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10182 tp->phy_crc_errors += val;
10184 return tp->phy_crc_errors;
10187 return get_stat64(&hw_stats->rx_fcs_errors);
10190 #define ESTAT_ADD(member) \
10191 estats->member = old_estats->member + \
10192 get_stat64(&hw_stats->member)
10194 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10196 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10197 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10199 ESTAT_ADD(rx_octets);
10200 ESTAT_ADD(rx_fragments);
10201 ESTAT_ADD(rx_ucast_packets);
10202 ESTAT_ADD(rx_mcast_packets);
10203 ESTAT_ADD(rx_bcast_packets);
10204 ESTAT_ADD(rx_fcs_errors);
10205 ESTAT_ADD(rx_align_errors);
10206 ESTAT_ADD(rx_xon_pause_rcvd);
10207 ESTAT_ADD(rx_xoff_pause_rcvd);
10208 ESTAT_ADD(rx_mac_ctrl_rcvd);
10209 ESTAT_ADD(rx_xoff_entered);
10210 ESTAT_ADD(rx_frame_too_long_errors);
10211 ESTAT_ADD(rx_jabbers);
10212 ESTAT_ADD(rx_undersize_packets);
10213 ESTAT_ADD(rx_in_length_errors);
10214 ESTAT_ADD(rx_out_length_errors);
10215 ESTAT_ADD(rx_64_or_less_octet_packets);
10216 ESTAT_ADD(rx_65_to_127_octet_packets);
10217 ESTAT_ADD(rx_128_to_255_octet_packets);
10218 ESTAT_ADD(rx_256_to_511_octet_packets);
10219 ESTAT_ADD(rx_512_to_1023_octet_packets);
10220 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10221 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10222 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10223 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10224 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10226 ESTAT_ADD(tx_octets);
10227 ESTAT_ADD(tx_collisions);
10228 ESTAT_ADD(tx_xon_sent);
10229 ESTAT_ADD(tx_xoff_sent);
10230 ESTAT_ADD(tx_flow_control);
10231 ESTAT_ADD(tx_mac_errors);
10232 ESTAT_ADD(tx_single_collisions);
10233 ESTAT_ADD(tx_mult_collisions);
10234 ESTAT_ADD(tx_deferred);
10235 ESTAT_ADD(tx_excessive_collisions);
10236 ESTAT_ADD(tx_late_collisions);
10237 ESTAT_ADD(tx_collide_2times);
10238 ESTAT_ADD(tx_collide_3times);
10239 ESTAT_ADD(tx_collide_4times);
10240 ESTAT_ADD(tx_collide_5times);
10241 ESTAT_ADD(tx_collide_6times);
10242 ESTAT_ADD(tx_collide_7times);
10243 ESTAT_ADD(tx_collide_8times);
10244 ESTAT_ADD(tx_collide_9times);
10245 ESTAT_ADD(tx_collide_10times);
10246 ESTAT_ADD(tx_collide_11times);
10247 ESTAT_ADD(tx_collide_12times);
10248 ESTAT_ADD(tx_collide_13times);
10249 ESTAT_ADD(tx_collide_14times);
10250 ESTAT_ADD(tx_collide_15times);
10251 ESTAT_ADD(tx_ucast_packets);
10252 ESTAT_ADD(tx_mcast_packets);
10253 ESTAT_ADD(tx_bcast_packets);
10254 ESTAT_ADD(tx_carrier_sense_errors);
10255 ESTAT_ADD(tx_discards);
10256 ESTAT_ADD(tx_errors);
10258 ESTAT_ADD(dma_writeq_full);
10259 ESTAT_ADD(dma_write_prioq_full);
10260 ESTAT_ADD(rxbds_empty);
10261 ESTAT_ADD(rx_discards);
10262 ESTAT_ADD(rx_errors);
10263 ESTAT_ADD(rx_threshold_hit);
10265 ESTAT_ADD(dma_readq_full);
10266 ESTAT_ADD(dma_read_prioq_full);
10267 ESTAT_ADD(tx_comp_queue_full);
10269 ESTAT_ADD(ring_set_send_prod_index);
10270 ESTAT_ADD(ring_status_update);
10271 ESTAT_ADD(nic_irqs);
10272 ESTAT_ADD(nic_avoided_irqs);
10273 ESTAT_ADD(nic_tx_threshold_hit);
10275 ESTAT_ADD(mbuf_lwm_thresh_hit);
10278 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10280 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10281 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10283 stats->rx_packets = old_stats->rx_packets +
10284 get_stat64(&hw_stats->rx_ucast_packets) +
10285 get_stat64(&hw_stats->rx_mcast_packets) +
10286 get_stat64(&hw_stats->rx_bcast_packets);
10288 stats->tx_packets = old_stats->tx_packets +
10289 get_stat64(&hw_stats->tx_ucast_packets) +
10290 get_stat64(&hw_stats->tx_mcast_packets) +
10291 get_stat64(&hw_stats->tx_bcast_packets);
10293 stats->rx_bytes = old_stats->rx_bytes +
10294 get_stat64(&hw_stats->rx_octets);
10295 stats->tx_bytes = old_stats->tx_bytes +
10296 get_stat64(&hw_stats->tx_octets);
10298 stats->rx_errors = old_stats->rx_errors +
10299 get_stat64(&hw_stats->rx_errors);
10300 stats->tx_errors = old_stats->tx_errors +
10301 get_stat64(&hw_stats->tx_errors) +
10302 get_stat64(&hw_stats->tx_mac_errors) +
10303 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10304 get_stat64(&hw_stats->tx_discards);
10306 stats->multicast = old_stats->multicast +
10307 get_stat64(&hw_stats->rx_mcast_packets);
10308 stats->collisions = old_stats->collisions +
10309 get_stat64(&hw_stats->tx_collisions);
10311 stats->rx_length_errors = old_stats->rx_length_errors +
10312 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10313 get_stat64(&hw_stats->rx_undersize_packets);
10315 stats->rx_over_errors = old_stats->rx_over_errors +
10316 get_stat64(&hw_stats->rxbds_empty);
10317 stats->rx_frame_errors = old_stats->rx_frame_errors +
10318 get_stat64(&hw_stats->rx_align_errors);
10319 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10320 get_stat64(&hw_stats->tx_discards);
10321 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10322 get_stat64(&hw_stats->tx_carrier_sense_errors);
10324 stats->rx_crc_errors = old_stats->rx_crc_errors +
10325 tg3_calc_crc_errors(tp);
10327 stats->rx_missed_errors = old_stats->rx_missed_errors +
10328 get_stat64(&hw_stats->rx_discards);
10330 stats->rx_dropped = tp->rx_dropped;
10331 stats->tx_dropped = tp->tx_dropped;
10334 static int tg3_get_regs_len(struct net_device *dev)
10336 return TG3_REG_BLK_SIZE;
10339 static void tg3_get_regs(struct net_device *dev,
10340 struct ethtool_regs *regs, void *_p)
10342 struct tg3 *tp = netdev_priv(dev);
10346 memset(_p, 0, TG3_REG_BLK_SIZE);
10348 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10351 tg3_full_lock(tp, 0);
10353 tg3_dump_legacy_regs(tp, (u32 *)_p);
10355 tg3_full_unlock(tp);
10358 static int tg3_get_eeprom_len(struct net_device *dev)
10360 struct tg3 *tp = netdev_priv(dev);
10362 return tp->nvram_size;
10365 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10367 struct tg3 *tp = netdev_priv(dev);
10370 u32 i, offset, len, b_offset, b_count;
10373 if (tg3_flag(tp, NO_NVRAM))
10376 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10379 offset = eeprom->offset;
10383 eeprom->magic = TG3_EEPROM_MAGIC;
10386 /* adjustments to start on required 4 byte boundary */
10387 b_offset = offset & 3;
10388 b_count = 4 - b_offset;
10389 if (b_count > len) {
10390 /* i.e. offset=1 len=2 */
10393 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10396 memcpy(data, ((char *)&val) + b_offset, b_count);
10399 eeprom->len += b_count;
10402 /* read bytes up to the last 4 byte boundary */
10403 pd = &data[eeprom->len];
10404 for (i = 0; i < (len - (len & 3)); i += 4) {
10405 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10410 memcpy(pd + i, &val, 4);
10415 /* read last bytes not ending on 4 byte boundary */
10416 pd = &data[eeprom->len];
10418 b_offset = offset + len - b_count;
10419 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10422 memcpy(pd, &val, b_count);
10423 eeprom->len += b_count;
10428 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10430 struct tg3 *tp = netdev_priv(dev);
10432 u32 offset, len, b_offset, odd_len;
10436 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10439 if (tg3_flag(tp, NO_NVRAM) ||
10440 eeprom->magic != TG3_EEPROM_MAGIC)
10443 offset = eeprom->offset;
10446 if ((b_offset = (offset & 3))) {
10447 /* adjustments to start on required 4 byte boundary */
10448 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10459 /* adjustments to end on required 4 byte boundary */
10461 len = (len + 3) & ~3;
10462 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10468 if (b_offset || odd_len) {
10469 buf = kmalloc(len, GFP_KERNEL);
10473 memcpy(buf, &start, 4);
10475 memcpy(buf+len-4, &end, 4);
10476 memcpy(buf + b_offset, data, eeprom->len);
10479 ret = tg3_nvram_write_block(tp, offset, len, buf);
10487 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10489 struct tg3 *tp = netdev_priv(dev);
10491 if (tg3_flag(tp, USE_PHYLIB)) {
10492 struct phy_device *phydev;
10493 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10495 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10496 return phy_ethtool_gset(phydev, cmd);
10499 cmd->supported = (SUPPORTED_Autoneg);
10501 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10502 cmd->supported |= (SUPPORTED_1000baseT_Half |
10503 SUPPORTED_1000baseT_Full);
10505 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10506 cmd->supported |= (SUPPORTED_100baseT_Half |
10507 SUPPORTED_100baseT_Full |
10508 SUPPORTED_10baseT_Half |
10509 SUPPORTED_10baseT_Full |
10511 cmd->port = PORT_TP;
10513 cmd->supported |= SUPPORTED_FIBRE;
10514 cmd->port = PORT_FIBRE;
10517 cmd->advertising = tp->link_config.advertising;
10518 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10519 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10520 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10521 cmd->advertising |= ADVERTISED_Pause;
10523 cmd->advertising |= ADVERTISED_Pause |
10524 ADVERTISED_Asym_Pause;
10526 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10527 cmd->advertising |= ADVERTISED_Asym_Pause;
10530 if (netif_running(dev) && netif_carrier_ok(dev)) {
10531 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10532 cmd->duplex = tp->link_config.active_duplex;
10533 cmd->lp_advertising = tp->link_config.rmt_adv;
10534 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10535 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10536 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10538 cmd->eth_tp_mdix = ETH_TP_MDI;
10541 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10542 cmd->duplex = DUPLEX_UNKNOWN;
10543 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10545 cmd->phy_address = tp->phy_addr;
10546 cmd->transceiver = XCVR_INTERNAL;
10547 cmd->autoneg = tp->link_config.autoneg;
10553 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10555 struct tg3 *tp = netdev_priv(dev);
10556 u32 speed = ethtool_cmd_speed(cmd);
10558 if (tg3_flag(tp, USE_PHYLIB)) {
10559 struct phy_device *phydev;
10560 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10562 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10563 return phy_ethtool_sset(phydev, cmd);
10566 if (cmd->autoneg != AUTONEG_ENABLE &&
10567 cmd->autoneg != AUTONEG_DISABLE)
10570 if (cmd->autoneg == AUTONEG_DISABLE &&
10571 cmd->duplex != DUPLEX_FULL &&
10572 cmd->duplex != DUPLEX_HALF)
10575 if (cmd->autoneg == AUTONEG_ENABLE) {
10576 u32 mask = ADVERTISED_Autoneg |
10578 ADVERTISED_Asym_Pause;
10580 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10581 mask |= ADVERTISED_1000baseT_Half |
10582 ADVERTISED_1000baseT_Full;
10584 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10585 mask |= ADVERTISED_100baseT_Half |
10586 ADVERTISED_100baseT_Full |
10587 ADVERTISED_10baseT_Half |
10588 ADVERTISED_10baseT_Full |
10591 mask |= ADVERTISED_FIBRE;
10593 if (cmd->advertising & ~mask)
10596 mask &= (ADVERTISED_1000baseT_Half |
10597 ADVERTISED_1000baseT_Full |
10598 ADVERTISED_100baseT_Half |
10599 ADVERTISED_100baseT_Full |
10600 ADVERTISED_10baseT_Half |
10601 ADVERTISED_10baseT_Full);
10603 cmd->advertising &= mask;
10605 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10606 if (speed != SPEED_1000)
10609 if (cmd->duplex != DUPLEX_FULL)
10612 if (speed != SPEED_100 &&
10618 tg3_full_lock(tp, 0);
10620 tp->link_config.autoneg = cmd->autoneg;
10621 if (cmd->autoneg == AUTONEG_ENABLE) {
10622 tp->link_config.advertising = (cmd->advertising |
10623 ADVERTISED_Autoneg);
10624 tp->link_config.speed = SPEED_UNKNOWN;
10625 tp->link_config.duplex = DUPLEX_UNKNOWN;
10627 tp->link_config.advertising = 0;
10628 tp->link_config.speed = speed;
10629 tp->link_config.duplex = cmd->duplex;
10632 if (netif_running(dev))
10633 tg3_setup_phy(tp, 1);
10635 tg3_full_unlock(tp);
10640 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10642 struct tg3 *tp = netdev_priv(dev);
10644 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10645 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10646 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10647 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10650 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10652 struct tg3 *tp = netdev_priv(dev);
10654 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10655 wol->supported = WAKE_MAGIC;
10657 wol->supported = 0;
10659 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10660 wol->wolopts = WAKE_MAGIC;
10661 memset(&wol->sopass, 0, sizeof(wol->sopass));
10664 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10666 struct tg3 *tp = netdev_priv(dev);
10667 struct device *dp = &tp->pdev->dev;
10669 if (wol->wolopts & ~WAKE_MAGIC)
10671 if ((wol->wolopts & WAKE_MAGIC) &&
10672 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10675 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10677 spin_lock_bh(&tp->lock);
10678 if (device_may_wakeup(dp))
10679 tg3_flag_set(tp, WOL_ENABLE);
10681 tg3_flag_clear(tp, WOL_ENABLE);
10682 spin_unlock_bh(&tp->lock);
10687 static u32 tg3_get_msglevel(struct net_device *dev)
10689 struct tg3 *tp = netdev_priv(dev);
10690 return tp->msg_enable;
10693 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10695 struct tg3 *tp = netdev_priv(dev);
10696 tp->msg_enable = value;
10699 static int tg3_nway_reset(struct net_device *dev)
10701 struct tg3 *tp = netdev_priv(dev);
10704 if (!netif_running(dev))
10707 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10710 if (tg3_flag(tp, USE_PHYLIB)) {
10711 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10713 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10717 spin_lock_bh(&tp->lock);
10719 tg3_readphy(tp, MII_BMCR, &bmcr);
10720 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10721 ((bmcr & BMCR_ANENABLE) ||
10722 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10723 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10727 spin_unlock_bh(&tp->lock);
10733 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10735 struct tg3 *tp = netdev_priv(dev);
10737 ering->rx_max_pending = tp->rx_std_ring_mask;
10738 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10739 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10741 ering->rx_jumbo_max_pending = 0;
10743 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10745 ering->rx_pending = tp->rx_pending;
10746 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10747 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10749 ering->rx_jumbo_pending = 0;
10751 ering->tx_pending = tp->napi[0].tx_pending;
10754 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10756 struct tg3 *tp = netdev_priv(dev);
10757 int i, irq_sync = 0, err = 0;
10759 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10760 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10761 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10762 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10763 (tg3_flag(tp, TSO_BUG) &&
10764 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10767 if (netif_running(dev)) {
10769 tg3_netif_stop(tp);
10773 tg3_full_lock(tp, irq_sync);
10775 tp->rx_pending = ering->rx_pending;
10777 if (tg3_flag(tp, MAX_RXPEND_64) &&
10778 tp->rx_pending > 63)
10779 tp->rx_pending = 63;
10780 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10782 for (i = 0; i < tp->irq_max; i++)
10783 tp->napi[i].tx_pending = ering->tx_pending;
10785 if (netif_running(dev)) {
10786 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10787 err = tg3_restart_hw(tp, 1);
10789 tg3_netif_start(tp);
10792 tg3_full_unlock(tp);
10794 if (irq_sync && !err)
10800 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10802 struct tg3 *tp = netdev_priv(dev);
10804 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10806 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10807 epause->rx_pause = 1;
10809 epause->rx_pause = 0;
10811 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10812 epause->tx_pause = 1;
10814 epause->tx_pause = 0;
10817 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10819 struct tg3 *tp = netdev_priv(dev);
10822 if (tg3_flag(tp, USE_PHYLIB)) {
10824 struct phy_device *phydev;
10826 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10828 if (!(phydev->supported & SUPPORTED_Pause) ||
10829 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10830 (epause->rx_pause != epause->tx_pause)))
10833 tp->link_config.flowctrl = 0;
10834 if (epause->rx_pause) {
10835 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10837 if (epause->tx_pause) {
10838 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10839 newadv = ADVERTISED_Pause;
10841 newadv = ADVERTISED_Pause |
10842 ADVERTISED_Asym_Pause;
10843 } else if (epause->tx_pause) {
10844 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10845 newadv = ADVERTISED_Asym_Pause;
10849 if (epause->autoneg)
10850 tg3_flag_set(tp, PAUSE_AUTONEG);
10852 tg3_flag_clear(tp, PAUSE_AUTONEG);
10854 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10855 u32 oldadv = phydev->advertising &
10856 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10857 if (oldadv != newadv) {
10858 phydev->advertising &=
10859 ~(ADVERTISED_Pause |
10860 ADVERTISED_Asym_Pause);
10861 phydev->advertising |= newadv;
10862 if (phydev->autoneg) {
10864 * Always renegotiate the link to
10865 * inform our link partner of our
10866 * flow control settings, even if the
10867 * flow control is forced. Let
10868 * tg3_adjust_link() do the final
10869 * flow control setup.
10871 return phy_start_aneg(phydev);
10875 if (!epause->autoneg)
10876 tg3_setup_flow_control(tp, 0, 0);
10878 tp->link_config.advertising &=
10879 ~(ADVERTISED_Pause |
10880 ADVERTISED_Asym_Pause);
10881 tp->link_config.advertising |= newadv;
10886 if (netif_running(dev)) {
10887 tg3_netif_stop(tp);
10891 tg3_full_lock(tp, irq_sync);
10893 if (epause->autoneg)
10894 tg3_flag_set(tp, PAUSE_AUTONEG);
10896 tg3_flag_clear(tp, PAUSE_AUTONEG);
10897 if (epause->rx_pause)
10898 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10900 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10901 if (epause->tx_pause)
10902 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10904 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10906 if (netif_running(dev)) {
10907 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10908 err = tg3_restart_hw(tp, 1);
10910 tg3_netif_start(tp);
10913 tg3_full_unlock(tp);
10919 static int tg3_get_sset_count(struct net_device *dev, int sset)
10923 return TG3_NUM_TEST;
10925 return TG3_NUM_STATS;
10927 return -EOPNOTSUPP;
10931 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10932 u32 *rules __always_unused)
10934 struct tg3 *tp = netdev_priv(dev);
10936 if (!tg3_flag(tp, SUPPORT_MSIX))
10937 return -EOPNOTSUPP;
10939 switch (info->cmd) {
10940 case ETHTOOL_GRXRINGS:
10941 if (netif_running(tp->dev))
10942 info->data = tp->irq_cnt;
10944 info->data = num_online_cpus();
10945 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10946 info->data = TG3_IRQ_MAX_VECS_RSS;
10949 /* The first interrupt vector only
10950 * handles link interrupts.
10956 return -EOPNOTSUPP;
10960 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10963 struct tg3 *tp = netdev_priv(dev);
10965 if (tg3_flag(tp, SUPPORT_MSIX))
10966 size = TG3_RSS_INDIR_TBL_SIZE;
10971 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10973 struct tg3 *tp = netdev_priv(dev);
10976 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10977 indir[i] = tp->rss_ind_tbl[i];
10982 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10984 struct tg3 *tp = netdev_priv(dev);
10987 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10988 tp->rss_ind_tbl[i] = indir[i];
10990 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10993 /* It is legal to write the indirection
10994 * table while the device is running.
10996 tg3_full_lock(tp, 0);
10997 tg3_rss_write_indir_tbl(tp);
10998 tg3_full_unlock(tp);
11003 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11005 switch (stringset) {
11007 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11010 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11013 WARN_ON(1); /* we need a WARN() */
11018 static int tg3_set_phys_id(struct net_device *dev,
11019 enum ethtool_phys_id_state state)
11021 struct tg3 *tp = netdev_priv(dev);
11023 if (!netif_running(tp->dev))
11027 case ETHTOOL_ID_ACTIVE:
11028 return 1; /* cycle on/off once per second */
11030 case ETHTOOL_ID_ON:
11031 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11032 LED_CTRL_1000MBPS_ON |
11033 LED_CTRL_100MBPS_ON |
11034 LED_CTRL_10MBPS_ON |
11035 LED_CTRL_TRAFFIC_OVERRIDE |
11036 LED_CTRL_TRAFFIC_BLINK |
11037 LED_CTRL_TRAFFIC_LED);
11040 case ETHTOOL_ID_OFF:
11041 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11042 LED_CTRL_TRAFFIC_OVERRIDE);
11045 case ETHTOOL_ID_INACTIVE:
11046 tw32(MAC_LED_CTRL, tp->led_ctrl);
11053 static void tg3_get_ethtool_stats(struct net_device *dev,
11054 struct ethtool_stats *estats, u64 *tmp_stats)
11056 struct tg3 *tp = netdev_priv(dev);
11059 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11061 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11064 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11068 u32 offset = 0, len = 0;
11071 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11074 if (magic == TG3_EEPROM_MAGIC) {
11075 for (offset = TG3_NVM_DIR_START;
11076 offset < TG3_NVM_DIR_END;
11077 offset += TG3_NVM_DIRENT_SIZE) {
11078 if (tg3_nvram_read(tp, offset, &val))
11081 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11082 TG3_NVM_DIRTYPE_EXTVPD)
11086 if (offset != TG3_NVM_DIR_END) {
11087 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11088 if (tg3_nvram_read(tp, offset + 4, &offset))
11091 offset = tg3_nvram_logical_addr(tp, offset);
11095 if (!offset || !len) {
11096 offset = TG3_NVM_VPD_OFF;
11097 len = TG3_NVM_VPD_LEN;
11100 buf = kmalloc(len, GFP_KERNEL);
11104 if (magic == TG3_EEPROM_MAGIC) {
11105 for (i = 0; i < len; i += 4) {
11106 /* The data is in little-endian format in NVRAM.
11107 * Use the big-endian read routines to preserve
11108 * the byte order as it exists in NVRAM.
11110 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11116 unsigned int pos = 0;
11118 ptr = (u8 *)&buf[0];
11119 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11120 cnt = pci_read_vpd(tp->pdev, pos,
11122 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11140 #define NVRAM_TEST_SIZE 0x100
11141 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11142 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11143 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11144 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11145 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11146 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11147 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11148 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11150 static int tg3_test_nvram(struct tg3 *tp)
11152 u32 csum, magic, len;
11154 int i, j, k, err = 0, size;
11156 if (tg3_flag(tp, NO_NVRAM))
11159 if (tg3_nvram_read(tp, 0, &magic) != 0)
11162 if (magic == TG3_EEPROM_MAGIC)
11163 size = NVRAM_TEST_SIZE;
11164 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11165 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11166 TG3_EEPROM_SB_FORMAT_1) {
11167 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11168 case TG3_EEPROM_SB_REVISION_0:
11169 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11171 case TG3_EEPROM_SB_REVISION_2:
11172 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11174 case TG3_EEPROM_SB_REVISION_3:
11175 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11177 case TG3_EEPROM_SB_REVISION_4:
11178 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11180 case TG3_EEPROM_SB_REVISION_5:
11181 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11183 case TG3_EEPROM_SB_REVISION_6:
11184 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11191 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11192 size = NVRAM_SELFBOOT_HW_SIZE;
11196 buf = kmalloc(size, GFP_KERNEL);
11201 for (i = 0, j = 0; i < size; i += 4, j++) {
11202 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11209 /* Selfboot format */
11210 magic = be32_to_cpu(buf[0]);
11211 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11212 TG3_EEPROM_MAGIC_FW) {
11213 u8 *buf8 = (u8 *) buf, csum8 = 0;
11215 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11216 TG3_EEPROM_SB_REVISION_2) {
11217 /* For rev 2, the csum doesn't include the MBA. */
11218 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11220 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11223 for (i = 0; i < size; i++)
11236 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11237 TG3_EEPROM_MAGIC_HW) {
11238 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11239 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11240 u8 *buf8 = (u8 *) buf;
11242 /* Separate the parity bits and the data bytes. */
11243 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11244 if ((i == 0) || (i == 8)) {
11248 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11249 parity[k++] = buf8[i] & msk;
11251 } else if (i == 16) {
11255 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11256 parity[k++] = buf8[i] & msk;
11259 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11260 parity[k++] = buf8[i] & msk;
11263 data[j++] = buf8[i];
11267 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11268 u8 hw8 = hweight8(data[i]);
11270 if ((hw8 & 0x1) && parity[i])
11272 else if (!(hw8 & 0x1) && !parity[i])
11281 /* Bootstrap checksum at offset 0x10 */
11282 csum = calc_crc((unsigned char *) buf, 0x10);
11283 if (csum != le32_to_cpu(buf[0x10/4]))
11286 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11287 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11288 if (csum != le32_to_cpu(buf[0xfc/4]))
11293 buf = tg3_vpd_readblock(tp, &len);
11297 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11299 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11303 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11306 i += PCI_VPD_LRDT_TAG_SIZE;
11307 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11308 PCI_VPD_RO_KEYWORD_CHKSUM);
11312 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11314 for (i = 0; i <= j; i++)
11315 csum8 += ((u8 *)buf)[i];
11329 #define TG3_SERDES_TIMEOUT_SEC 2
11330 #define TG3_COPPER_TIMEOUT_SEC 6
11332 static int tg3_test_link(struct tg3 *tp)
11336 if (!netif_running(tp->dev))
11339 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11340 max = TG3_SERDES_TIMEOUT_SEC;
11342 max = TG3_COPPER_TIMEOUT_SEC;
11344 for (i = 0; i < max; i++) {
11345 if (netif_carrier_ok(tp->dev))
11348 if (msleep_interruptible(1000))
11355 /* Only test the commonly used registers */
11356 static int tg3_test_registers(struct tg3 *tp)
11358 int i, is_5705, is_5750;
11359 u32 offset, read_mask, write_mask, val, save_val, read_val;
11363 #define TG3_FL_5705 0x1
11364 #define TG3_FL_NOT_5705 0x2
11365 #define TG3_FL_NOT_5788 0x4
11366 #define TG3_FL_NOT_5750 0x8
11370 /* MAC Control Registers */
11371 { MAC_MODE, TG3_FL_NOT_5705,
11372 0x00000000, 0x00ef6f8c },
11373 { MAC_MODE, TG3_FL_5705,
11374 0x00000000, 0x01ef6b8c },
11375 { MAC_STATUS, TG3_FL_NOT_5705,
11376 0x03800107, 0x00000000 },
11377 { MAC_STATUS, TG3_FL_5705,
11378 0x03800100, 0x00000000 },
11379 { MAC_ADDR_0_HIGH, 0x0000,
11380 0x00000000, 0x0000ffff },
11381 { MAC_ADDR_0_LOW, 0x0000,
11382 0x00000000, 0xffffffff },
11383 { MAC_RX_MTU_SIZE, 0x0000,
11384 0x00000000, 0x0000ffff },
11385 { MAC_TX_MODE, 0x0000,
11386 0x00000000, 0x00000070 },
11387 { MAC_TX_LENGTHS, 0x0000,
11388 0x00000000, 0x00003fff },
11389 { MAC_RX_MODE, TG3_FL_NOT_5705,
11390 0x00000000, 0x000007fc },
11391 { MAC_RX_MODE, TG3_FL_5705,
11392 0x00000000, 0x000007dc },
11393 { MAC_HASH_REG_0, 0x0000,
11394 0x00000000, 0xffffffff },
11395 { MAC_HASH_REG_1, 0x0000,
11396 0x00000000, 0xffffffff },
11397 { MAC_HASH_REG_2, 0x0000,
11398 0x00000000, 0xffffffff },
11399 { MAC_HASH_REG_3, 0x0000,
11400 0x00000000, 0xffffffff },
11402 /* Receive Data and Receive BD Initiator Control Registers. */
11403 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11404 0x00000000, 0xffffffff },
11405 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11406 0x00000000, 0xffffffff },
11407 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11408 0x00000000, 0x00000003 },
11409 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11410 0x00000000, 0xffffffff },
11411 { RCVDBDI_STD_BD+0, 0x0000,
11412 0x00000000, 0xffffffff },
11413 { RCVDBDI_STD_BD+4, 0x0000,
11414 0x00000000, 0xffffffff },
11415 { RCVDBDI_STD_BD+8, 0x0000,
11416 0x00000000, 0xffff0002 },
11417 { RCVDBDI_STD_BD+0xc, 0x0000,
11418 0x00000000, 0xffffffff },
11420 /* Receive BD Initiator Control Registers. */
11421 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11422 0x00000000, 0xffffffff },
11423 { RCVBDI_STD_THRESH, TG3_FL_5705,
11424 0x00000000, 0x000003ff },
11425 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11426 0x00000000, 0xffffffff },
11428 /* Host Coalescing Control Registers. */
11429 { HOSTCC_MODE, TG3_FL_NOT_5705,
11430 0x00000000, 0x00000004 },
11431 { HOSTCC_MODE, TG3_FL_5705,
11432 0x00000000, 0x000000f6 },
11433 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11434 0x00000000, 0xffffffff },
11435 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11436 0x00000000, 0x000003ff },
11437 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11438 0x00000000, 0xffffffff },
11439 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11440 0x00000000, 0x000003ff },
11441 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11442 0x00000000, 0xffffffff },
11443 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11444 0x00000000, 0x000000ff },
11445 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11446 0x00000000, 0xffffffff },
11447 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11448 0x00000000, 0x000000ff },
11449 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11450 0x00000000, 0xffffffff },
11451 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11452 0x00000000, 0xffffffff },
11453 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11454 0x00000000, 0xffffffff },
11455 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11456 0x00000000, 0x000000ff },
11457 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11458 0x00000000, 0xffffffff },
11459 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11460 0x00000000, 0x000000ff },
11461 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11462 0x00000000, 0xffffffff },
11463 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11464 0x00000000, 0xffffffff },
11465 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11466 0x00000000, 0xffffffff },
11467 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11468 0x00000000, 0xffffffff },
11469 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11470 0x00000000, 0xffffffff },
11471 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11472 0xffffffff, 0x00000000 },
11473 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11474 0xffffffff, 0x00000000 },
11476 /* Buffer Manager Control Registers. */
11477 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11478 0x00000000, 0x007fff80 },
11479 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11480 0x00000000, 0x007fffff },
11481 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11482 0x00000000, 0x0000003f },
11483 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11484 0x00000000, 0x000001ff },
11485 { BUFMGR_MB_HIGH_WATER, 0x0000,
11486 0x00000000, 0x000001ff },
11487 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11488 0xffffffff, 0x00000000 },
11489 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11490 0xffffffff, 0x00000000 },
11492 /* Mailbox Registers */
11493 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11494 0x00000000, 0x000001ff },
11495 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11496 0x00000000, 0x000001ff },
11497 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11498 0x00000000, 0x000007ff },
11499 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11500 0x00000000, 0x000001ff },
11502 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11505 is_5705 = is_5750 = 0;
11506 if (tg3_flag(tp, 5705_PLUS)) {
11508 if (tg3_flag(tp, 5750_PLUS))
11512 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11513 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11516 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11519 if (tg3_flag(tp, IS_5788) &&
11520 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11523 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11526 offset = (u32) reg_tbl[i].offset;
11527 read_mask = reg_tbl[i].read_mask;
11528 write_mask = reg_tbl[i].write_mask;
11530 /* Save the original register content */
11531 save_val = tr32(offset);
11533 /* Determine the read-only value. */
11534 read_val = save_val & read_mask;
11536 /* Write zero to the register, then make sure the read-only bits
11537 * are not changed and the read/write bits are all zeros.
11541 val = tr32(offset);
11543 /* Test the read-only and read/write bits. */
11544 if (((val & read_mask) != read_val) || (val & write_mask))
11547 /* Write ones to all the bits defined by RdMask and WrMask, then
11548 * make sure the read-only bits are not changed and the
11549 * read/write bits are all ones.
11551 tw32(offset, read_mask | write_mask);
11553 val = tr32(offset);
11555 /* Test the read-only bits. */
11556 if ((val & read_mask) != read_val)
11559 /* Test the read/write bits. */
11560 if ((val & write_mask) != write_mask)
11563 tw32(offset, save_val);
11569 if (netif_msg_hw(tp))
11570 netdev_err(tp->dev,
11571 "Register test failed at offset %x\n", offset);
11572 tw32(offset, save_val);
11576 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11578 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11582 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11583 for (j = 0; j < len; j += 4) {
11586 tg3_write_mem(tp, offset + j, test_pattern[i]);
11587 tg3_read_mem(tp, offset + j, &val);
11588 if (val != test_pattern[i])
11595 static int tg3_test_memory(struct tg3 *tp)
11597 static struct mem_entry {
11600 } mem_tbl_570x[] = {
11601 { 0x00000000, 0x00b50},
11602 { 0x00002000, 0x1c000},
11603 { 0xffffffff, 0x00000}
11604 }, mem_tbl_5705[] = {
11605 { 0x00000100, 0x0000c},
11606 { 0x00000200, 0x00008},
11607 { 0x00004000, 0x00800},
11608 { 0x00006000, 0x01000},
11609 { 0x00008000, 0x02000},
11610 { 0x00010000, 0x0e000},
11611 { 0xffffffff, 0x00000}
11612 }, mem_tbl_5755[] = {
11613 { 0x00000200, 0x00008},
11614 { 0x00004000, 0x00800},
11615 { 0x00006000, 0x00800},
11616 { 0x00008000, 0x02000},
11617 { 0x00010000, 0x0c000},
11618 { 0xffffffff, 0x00000}
11619 }, mem_tbl_5906[] = {
11620 { 0x00000200, 0x00008},
11621 { 0x00004000, 0x00400},
11622 { 0x00006000, 0x00400},
11623 { 0x00008000, 0x01000},
11624 { 0x00010000, 0x01000},
11625 { 0xffffffff, 0x00000}
11626 }, mem_tbl_5717[] = {
11627 { 0x00000200, 0x00008},
11628 { 0x00010000, 0x0a000},
11629 { 0x00020000, 0x13c00},
11630 { 0xffffffff, 0x00000}
11631 }, mem_tbl_57765[] = {
11632 { 0x00000200, 0x00008},
11633 { 0x00004000, 0x00800},
11634 { 0x00006000, 0x09800},
11635 { 0x00010000, 0x0a000},
11636 { 0xffffffff, 0x00000}
11638 struct mem_entry *mem_tbl;
11642 if (tg3_flag(tp, 5717_PLUS))
11643 mem_tbl = mem_tbl_5717;
11644 else if (tg3_flag(tp, 57765_CLASS))
11645 mem_tbl = mem_tbl_57765;
11646 else if (tg3_flag(tp, 5755_PLUS))
11647 mem_tbl = mem_tbl_5755;
11648 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11649 mem_tbl = mem_tbl_5906;
11650 else if (tg3_flag(tp, 5705_PLUS))
11651 mem_tbl = mem_tbl_5705;
11653 mem_tbl = mem_tbl_570x;
11655 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11656 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11664 #define TG3_TSO_MSS 500
11666 #define TG3_TSO_IP_HDR_LEN 20
11667 #define TG3_TSO_TCP_HDR_LEN 20
11668 #define TG3_TSO_TCP_OPT_LEN 12
11670 static const u8 tg3_tso_header[] = {
11672 0x45, 0x00, 0x00, 0x00,
11673 0x00, 0x00, 0x40, 0x00,
11674 0x40, 0x06, 0x00, 0x00,
11675 0x0a, 0x00, 0x00, 0x01,
11676 0x0a, 0x00, 0x00, 0x02,
11677 0x0d, 0x00, 0xe0, 0x00,
11678 0x00, 0x00, 0x01, 0x00,
11679 0x00, 0x00, 0x02, 0x00,
11680 0x80, 0x10, 0x10, 0x00,
11681 0x14, 0x09, 0x00, 0x00,
11682 0x01, 0x01, 0x08, 0x0a,
11683 0x11, 0x11, 0x11, 0x11,
11684 0x11, 0x11, 0x11, 0x11,
11687 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11689 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11690 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11692 struct sk_buff *skb;
11693 u8 *tx_data, *rx_data;
11695 int num_pkts, tx_len, rx_len, i, err;
11696 struct tg3_rx_buffer_desc *desc;
11697 struct tg3_napi *tnapi, *rnapi;
11698 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11700 tnapi = &tp->napi[0];
11701 rnapi = &tp->napi[0];
11702 if (tp->irq_cnt > 1) {
11703 if (tg3_flag(tp, ENABLE_RSS))
11704 rnapi = &tp->napi[1];
11705 if (tg3_flag(tp, ENABLE_TSS))
11706 tnapi = &tp->napi[1];
11708 coal_now = tnapi->coal_now | rnapi->coal_now;
11713 skb = netdev_alloc_skb(tp->dev, tx_len);
11717 tx_data = skb_put(skb, tx_len);
11718 memcpy(tx_data, tp->dev->dev_addr, 6);
11719 memset(tx_data + 6, 0x0, 8);
11721 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11723 if (tso_loopback) {
11724 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11726 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11727 TG3_TSO_TCP_OPT_LEN;
11729 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11730 sizeof(tg3_tso_header));
11733 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11734 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11736 /* Set the total length field in the IP header */
11737 iph->tot_len = htons((u16)(mss + hdr_len));
11739 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11740 TXD_FLAG_CPU_POST_DMA);
11742 if (tg3_flag(tp, HW_TSO_1) ||
11743 tg3_flag(tp, HW_TSO_2) ||
11744 tg3_flag(tp, HW_TSO_3)) {
11746 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11747 th = (struct tcphdr *)&tx_data[val];
11750 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11752 if (tg3_flag(tp, HW_TSO_3)) {
11753 mss |= (hdr_len & 0xc) << 12;
11754 if (hdr_len & 0x10)
11755 base_flags |= 0x00000010;
11756 base_flags |= (hdr_len & 0x3e0) << 5;
11757 } else if (tg3_flag(tp, HW_TSO_2))
11758 mss |= hdr_len << 9;
11759 else if (tg3_flag(tp, HW_TSO_1) ||
11760 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11761 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11763 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11766 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11769 data_off = ETH_HLEN;
11771 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11772 tx_len > VLAN_ETH_FRAME_LEN)
11773 base_flags |= TXD_FLAG_JMB_PKT;
11776 for (i = data_off; i < tx_len; i++)
11777 tx_data[i] = (u8) (i & 0xff);
11779 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11780 if (pci_dma_mapping_error(tp->pdev, map)) {
11781 dev_kfree_skb(skb);
11785 val = tnapi->tx_prod;
11786 tnapi->tx_buffers[val].skb = skb;
11787 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11789 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11794 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11796 budget = tg3_tx_avail(tnapi);
11797 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11798 base_flags | TXD_FLAG_END, mss, 0)) {
11799 tnapi->tx_buffers[val].skb = NULL;
11800 dev_kfree_skb(skb);
11806 /* Sync BD data before updating mailbox */
11809 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11810 tr32_mailbox(tnapi->prodmbox);
11814 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11815 for (i = 0; i < 35; i++) {
11816 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11821 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11822 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11823 if ((tx_idx == tnapi->tx_prod) &&
11824 (rx_idx == (rx_start_idx + num_pkts)))
11828 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11829 dev_kfree_skb(skb);
11831 if (tx_idx != tnapi->tx_prod)
11834 if (rx_idx != rx_start_idx + num_pkts)
11838 while (rx_idx != rx_start_idx) {
11839 desc = &rnapi->rx_rcb[rx_start_idx++];
11840 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11841 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11843 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11844 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11847 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11850 if (!tso_loopback) {
11851 if (rx_len != tx_len)
11854 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11855 if (opaque_key != RXD_OPAQUE_RING_STD)
11858 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11861 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11862 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11863 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11867 if (opaque_key == RXD_OPAQUE_RING_STD) {
11868 rx_data = tpr->rx_std_buffers[desc_idx].data;
11869 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11871 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11872 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11873 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11878 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11879 PCI_DMA_FROMDEVICE);
11881 rx_data += TG3_RX_OFFSET(tp);
11882 for (i = data_off; i < rx_len; i++, val++) {
11883 if (*(rx_data + i) != (u8) (val & 0xff))
11890 /* tg3_free_rings will unmap and free the rx_data */
11895 #define TG3_STD_LOOPBACK_FAILED 1
11896 #define TG3_JMB_LOOPBACK_FAILED 2
11897 #define TG3_TSO_LOOPBACK_FAILED 4
11898 #define TG3_LOOPBACK_FAILED \
11899 (TG3_STD_LOOPBACK_FAILED | \
11900 TG3_JMB_LOOPBACK_FAILED | \
11901 TG3_TSO_LOOPBACK_FAILED)
11903 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11907 u32 jmb_pkt_sz = 9000;
11910 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11912 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11913 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11915 if (!netif_running(tp->dev)) {
11916 data[0] = TG3_LOOPBACK_FAILED;
11917 data[1] = TG3_LOOPBACK_FAILED;
11919 data[2] = TG3_LOOPBACK_FAILED;
11923 err = tg3_reset_hw(tp, 1);
11925 data[0] = TG3_LOOPBACK_FAILED;
11926 data[1] = TG3_LOOPBACK_FAILED;
11928 data[2] = TG3_LOOPBACK_FAILED;
11932 if (tg3_flag(tp, ENABLE_RSS)) {
11935 /* Reroute all rx packets to the 1st queue */
11936 for (i = MAC_RSS_INDIR_TBL_0;
11937 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11941 /* HW errata - mac loopback fails in some cases on 5780.
11942 * Normal traffic and PHY loopback are not affected by
11943 * errata. Also, the MAC loopback test is deprecated for
11944 * all newer ASIC revisions.
11946 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11947 !tg3_flag(tp, CPMU_PRESENT)) {
11948 tg3_mac_loopback(tp, true);
11950 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11951 data[0] |= TG3_STD_LOOPBACK_FAILED;
11953 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11954 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11955 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11957 tg3_mac_loopback(tp, false);
11960 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11961 !tg3_flag(tp, USE_PHYLIB)) {
11964 tg3_phy_lpbk_set(tp, 0, false);
11966 /* Wait for link */
11967 for (i = 0; i < 100; i++) {
11968 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11973 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11974 data[1] |= TG3_STD_LOOPBACK_FAILED;
11975 if (tg3_flag(tp, TSO_CAPABLE) &&
11976 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11977 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11978 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11979 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11980 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11983 tg3_phy_lpbk_set(tp, 0, true);
11985 /* All link indications report up, but the hardware
11986 * isn't really ready for about 20 msec. Double it
11991 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11992 data[2] |= TG3_STD_LOOPBACK_FAILED;
11993 if (tg3_flag(tp, TSO_CAPABLE) &&
11994 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11995 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11996 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11997 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11998 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12001 /* Re-enable gphy autopowerdown. */
12002 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12003 tg3_phy_toggle_apd(tp, true);
12006 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12009 tp->phy_flags |= eee_cap;
12014 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12017 struct tg3 *tp = netdev_priv(dev);
12018 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12020 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12021 tg3_power_up(tp)) {
12022 etest->flags |= ETH_TEST_FL_FAILED;
12023 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12027 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12029 if (tg3_test_nvram(tp) != 0) {
12030 etest->flags |= ETH_TEST_FL_FAILED;
12033 if (!doextlpbk && tg3_test_link(tp)) {
12034 etest->flags |= ETH_TEST_FL_FAILED;
12037 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12038 int err, err2 = 0, irq_sync = 0;
12040 if (netif_running(dev)) {
12042 tg3_netif_stop(tp);
12046 tg3_full_lock(tp, irq_sync);
12048 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12049 err = tg3_nvram_lock(tp);
12050 tg3_halt_cpu(tp, RX_CPU_BASE);
12051 if (!tg3_flag(tp, 5705_PLUS))
12052 tg3_halt_cpu(tp, TX_CPU_BASE);
12054 tg3_nvram_unlock(tp);
12056 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12059 if (tg3_test_registers(tp) != 0) {
12060 etest->flags |= ETH_TEST_FL_FAILED;
12064 if (tg3_test_memory(tp) != 0) {
12065 etest->flags |= ETH_TEST_FL_FAILED;
12070 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12072 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12073 etest->flags |= ETH_TEST_FL_FAILED;
12075 tg3_full_unlock(tp);
12077 if (tg3_test_interrupt(tp) != 0) {
12078 etest->flags |= ETH_TEST_FL_FAILED;
12082 tg3_full_lock(tp, 0);
12084 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12085 if (netif_running(dev)) {
12086 tg3_flag_set(tp, INIT_COMPLETE);
12087 err2 = tg3_restart_hw(tp, 1);
12089 tg3_netif_start(tp);
12092 tg3_full_unlock(tp);
12094 if (irq_sync && !err2)
12097 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12098 tg3_power_down(tp);
12102 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12104 struct mii_ioctl_data *data = if_mii(ifr);
12105 struct tg3 *tp = netdev_priv(dev);
12108 if (tg3_flag(tp, USE_PHYLIB)) {
12109 struct phy_device *phydev;
12110 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12112 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12113 return phy_mii_ioctl(phydev, ifr, cmd);
12118 data->phy_id = tp->phy_addr;
12121 case SIOCGMIIREG: {
12124 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12125 break; /* We have no PHY */
12127 if (!netif_running(dev))
12130 spin_lock_bh(&tp->lock);
12131 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12132 spin_unlock_bh(&tp->lock);
12134 data->val_out = mii_regval;
12140 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12141 break; /* We have no PHY */
12143 if (!netif_running(dev))
12146 spin_lock_bh(&tp->lock);
12147 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12148 spin_unlock_bh(&tp->lock);
12156 return -EOPNOTSUPP;
12159 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12161 struct tg3 *tp = netdev_priv(dev);
12163 memcpy(ec, &tp->coal, sizeof(*ec));
12167 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12169 struct tg3 *tp = netdev_priv(dev);
12170 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12171 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12173 if (!tg3_flag(tp, 5705_PLUS)) {
12174 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12175 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12176 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12177 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12180 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12181 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12182 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12183 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12184 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12185 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12186 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12187 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12188 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12189 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12192 /* No rx interrupts will be generated if both are zero */
12193 if ((ec->rx_coalesce_usecs == 0) &&
12194 (ec->rx_max_coalesced_frames == 0))
12197 /* No tx interrupts will be generated if both are zero */
12198 if ((ec->tx_coalesce_usecs == 0) &&
12199 (ec->tx_max_coalesced_frames == 0))
12202 /* Only copy relevant parameters, ignore all others. */
12203 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12204 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12205 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12206 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12207 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12208 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12209 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12210 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12211 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12213 if (netif_running(dev)) {
12214 tg3_full_lock(tp, 0);
12215 __tg3_set_coalesce(tp, &tp->coal);
12216 tg3_full_unlock(tp);
12221 static const struct ethtool_ops tg3_ethtool_ops = {
12222 .get_settings = tg3_get_settings,
12223 .set_settings = tg3_set_settings,
12224 .get_drvinfo = tg3_get_drvinfo,
12225 .get_regs_len = tg3_get_regs_len,
12226 .get_regs = tg3_get_regs,
12227 .get_wol = tg3_get_wol,
12228 .set_wol = tg3_set_wol,
12229 .get_msglevel = tg3_get_msglevel,
12230 .set_msglevel = tg3_set_msglevel,
12231 .nway_reset = tg3_nway_reset,
12232 .get_link = ethtool_op_get_link,
12233 .get_eeprom_len = tg3_get_eeprom_len,
12234 .get_eeprom = tg3_get_eeprom,
12235 .set_eeprom = tg3_set_eeprom,
12236 .get_ringparam = tg3_get_ringparam,
12237 .set_ringparam = tg3_set_ringparam,
12238 .get_pauseparam = tg3_get_pauseparam,
12239 .set_pauseparam = tg3_set_pauseparam,
12240 .self_test = tg3_self_test,
12241 .get_strings = tg3_get_strings,
12242 .set_phys_id = tg3_set_phys_id,
12243 .get_ethtool_stats = tg3_get_ethtool_stats,
12244 .get_coalesce = tg3_get_coalesce,
12245 .set_coalesce = tg3_set_coalesce,
12246 .get_sset_count = tg3_get_sset_count,
12247 .get_rxnfc = tg3_get_rxnfc,
12248 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12249 .get_rxfh_indir = tg3_get_rxfh_indir,
12250 .set_rxfh_indir = tg3_set_rxfh_indir,
12253 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12254 struct rtnl_link_stats64 *stats)
12256 struct tg3 *tp = netdev_priv(dev);
12259 return &tp->net_stats_prev;
12261 spin_lock_bh(&tp->lock);
12262 tg3_get_nstats(tp, stats);
12263 spin_unlock_bh(&tp->lock);
12268 static void tg3_set_rx_mode(struct net_device *dev)
12270 struct tg3 *tp = netdev_priv(dev);
12272 if (!netif_running(dev))
12275 tg3_full_lock(tp, 0);
12276 __tg3_set_rx_mode(dev);
12277 tg3_full_unlock(tp);
12280 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12283 dev->mtu = new_mtu;
12285 if (new_mtu > ETH_DATA_LEN) {
12286 if (tg3_flag(tp, 5780_CLASS)) {
12287 netdev_update_features(dev);
12288 tg3_flag_clear(tp, TSO_CAPABLE);
12290 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12293 if (tg3_flag(tp, 5780_CLASS)) {
12294 tg3_flag_set(tp, TSO_CAPABLE);
12295 netdev_update_features(dev);
12297 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12301 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12303 struct tg3 *tp = netdev_priv(dev);
12304 int err, reset_phy = 0;
12306 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12309 if (!netif_running(dev)) {
12310 /* We'll just catch it later when the
12313 tg3_set_mtu(dev, tp, new_mtu);
12319 tg3_netif_stop(tp);
12321 tg3_full_lock(tp, 1);
12323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12325 tg3_set_mtu(dev, tp, new_mtu);
12327 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12328 * breaks all requests to 256 bytes.
12330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12333 err = tg3_restart_hw(tp, reset_phy);
12336 tg3_netif_start(tp);
12338 tg3_full_unlock(tp);
12346 static const struct net_device_ops tg3_netdev_ops = {
12347 .ndo_open = tg3_open,
12348 .ndo_stop = tg3_close,
12349 .ndo_start_xmit = tg3_start_xmit,
12350 .ndo_get_stats64 = tg3_get_stats64,
12351 .ndo_validate_addr = eth_validate_addr,
12352 .ndo_set_rx_mode = tg3_set_rx_mode,
12353 .ndo_set_mac_address = tg3_set_mac_addr,
12354 .ndo_do_ioctl = tg3_ioctl,
12355 .ndo_tx_timeout = tg3_tx_timeout,
12356 .ndo_change_mtu = tg3_change_mtu,
12357 .ndo_fix_features = tg3_fix_features,
12358 .ndo_set_features = tg3_set_features,
12359 #ifdef CONFIG_NET_POLL_CONTROLLER
12360 .ndo_poll_controller = tg3_poll_controller,
12364 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12366 u32 cursize, val, magic;
12368 tp->nvram_size = EEPROM_CHIP_SIZE;
12370 if (tg3_nvram_read(tp, 0, &magic) != 0)
12373 if ((magic != TG3_EEPROM_MAGIC) &&
12374 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12375 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12379 * Size the chip by reading offsets at increasing powers of two.
12380 * When we encounter our validation signature, we know the addressing
12381 * has wrapped around, and thus have our chip size.
12385 while (cursize < tp->nvram_size) {
12386 if (tg3_nvram_read(tp, cursize, &val) != 0)
12395 tp->nvram_size = cursize;
12398 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12402 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12405 /* Selfboot format */
12406 if (val != TG3_EEPROM_MAGIC) {
12407 tg3_get_eeprom_size(tp);
12411 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12413 /* This is confusing. We want to operate on the
12414 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12415 * call will read from NVRAM and byteswap the data
12416 * according to the byteswapping settings for all
12417 * other register accesses. This ensures the data we
12418 * want will always reside in the lower 16-bits.
12419 * However, the data in NVRAM is in LE format, which
12420 * means the data from the NVRAM read will always be
12421 * opposite the endianness of the CPU. The 16-bit
12422 * byteswap then brings the data to CPU endianness.
12424 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12428 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12431 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12435 nvcfg1 = tr32(NVRAM_CFG1);
12436 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12437 tg3_flag_set(tp, FLASH);
12439 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12440 tw32(NVRAM_CFG1, nvcfg1);
12443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12444 tg3_flag(tp, 5780_CLASS)) {
12445 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12446 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12447 tp->nvram_jedecnum = JEDEC_ATMEL;
12448 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12449 tg3_flag_set(tp, NVRAM_BUFFERED);
12451 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12452 tp->nvram_jedecnum = JEDEC_ATMEL;
12453 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12455 case FLASH_VENDOR_ATMEL_EEPROM:
12456 tp->nvram_jedecnum = JEDEC_ATMEL;
12457 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12458 tg3_flag_set(tp, NVRAM_BUFFERED);
12460 case FLASH_VENDOR_ST:
12461 tp->nvram_jedecnum = JEDEC_ST;
12462 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12463 tg3_flag_set(tp, NVRAM_BUFFERED);
12465 case FLASH_VENDOR_SAIFUN:
12466 tp->nvram_jedecnum = JEDEC_SAIFUN;
12467 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12469 case FLASH_VENDOR_SST_SMALL:
12470 case FLASH_VENDOR_SST_LARGE:
12471 tp->nvram_jedecnum = JEDEC_SST;
12472 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12476 tp->nvram_jedecnum = JEDEC_ATMEL;
12477 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12478 tg3_flag_set(tp, NVRAM_BUFFERED);
12482 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12484 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12485 case FLASH_5752PAGE_SIZE_256:
12486 tp->nvram_pagesize = 256;
12488 case FLASH_5752PAGE_SIZE_512:
12489 tp->nvram_pagesize = 512;
12491 case FLASH_5752PAGE_SIZE_1K:
12492 tp->nvram_pagesize = 1024;
12494 case FLASH_5752PAGE_SIZE_2K:
12495 tp->nvram_pagesize = 2048;
12497 case FLASH_5752PAGE_SIZE_4K:
12498 tp->nvram_pagesize = 4096;
12500 case FLASH_5752PAGE_SIZE_264:
12501 tp->nvram_pagesize = 264;
12503 case FLASH_5752PAGE_SIZE_528:
12504 tp->nvram_pagesize = 528;
12509 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12513 nvcfg1 = tr32(NVRAM_CFG1);
12515 /* NVRAM protection for TPM */
12516 if (nvcfg1 & (1 << 27))
12517 tg3_flag_set(tp, PROTECTED_NVRAM);
12519 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12520 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12521 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12522 tp->nvram_jedecnum = JEDEC_ATMEL;
12523 tg3_flag_set(tp, NVRAM_BUFFERED);
12525 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12526 tp->nvram_jedecnum = JEDEC_ATMEL;
12527 tg3_flag_set(tp, NVRAM_BUFFERED);
12528 tg3_flag_set(tp, FLASH);
12530 case FLASH_5752VENDOR_ST_M45PE10:
12531 case FLASH_5752VENDOR_ST_M45PE20:
12532 case FLASH_5752VENDOR_ST_M45PE40:
12533 tp->nvram_jedecnum = JEDEC_ST;
12534 tg3_flag_set(tp, NVRAM_BUFFERED);
12535 tg3_flag_set(tp, FLASH);
12539 if (tg3_flag(tp, FLASH)) {
12540 tg3_nvram_get_pagesize(tp, nvcfg1);
12542 /* For eeprom, set pagesize to maximum eeprom size */
12543 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12545 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12546 tw32(NVRAM_CFG1, nvcfg1);
12550 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12552 u32 nvcfg1, protect = 0;
12554 nvcfg1 = tr32(NVRAM_CFG1);
12556 /* NVRAM protection for TPM */
12557 if (nvcfg1 & (1 << 27)) {
12558 tg3_flag_set(tp, PROTECTED_NVRAM);
12562 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12564 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12565 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12566 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12567 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12568 tp->nvram_jedecnum = JEDEC_ATMEL;
12569 tg3_flag_set(tp, NVRAM_BUFFERED);
12570 tg3_flag_set(tp, FLASH);
12571 tp->nvram_pagesize = 264;
12572 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12573 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12574 tp->nvram_size = (protect ? 0x3e200 :
12575 TG3_NVRAM_SIZE_512KB);
12576 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12577 tp->nvram_size = (protect ? 0x1f200 :
12578 TG3_NVRAM_SIZE_256KB);
12580 tp->nvram_size = (protect ? 0x1f200 :
12581 TG3_NVRAM_SIZE_128KB);
12583 case FLASH_5752VENDOR_ST_M45PE10:
12584 case FLASH_5752VENDOR_ST_M45PE20:
12585 case FLASH_5752VENDOR_ST_M45PE40:
12586 tp->nvram_jedecnum = JEDEC_ST;
12587 tg3_flag_set(tp, NVRAM_BUFFERED);
12588 tg3_flag_set(tp, FLASH);
12589 tp->nvram_pagesize = 256;
12590 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12591 tp->nvram_size = (protect ?
12592 TG3_NVRAM_SIZE_64KB :
12593 TG3_NVRAM_SIZE_128KB);
12594 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12595 tp->nvram_size = (protect ?
12596 TG3_NVRAM_SIZE_64KB :
12597 TG3_NVRAM_SIZE_256KB);
12599 tp->nvram_size = (protect ?
12600 TG3_NVRAM_SIZE_128KB :
12601 TG3_NVRAM_SIZE_512KB);
12606 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12610 nvcfg1 = tr32(NVRAM_CFG1);
12612 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12613 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12614 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12615 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12616 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12617 tp->nvram_jedecnum = JEDEC_ATMEL;
12618 tg3_flag_set(tp, NVRAM_BUFFERED);
12619 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12621 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12622 tw32(NVRAM_CFG1, nvcfg1);
12624 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12625 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12626 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12627 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12628 tp->nvram_jedecnum = JEDEC_ATMEL;
12629 tg3_flag_set(tp, NVRAM_BUFFERED);
12630 tg3_flag_set(tp, FLASH);
12631 tp->nvram_pagesize = 264;
12633 case FLASH_5752VENDOR_ST_M45PE10:
12634 case FLASH_5752VENDOR_ST_M45PE20:
12635 case FLASH_5752VENDOR_ST_M45PE40:
12636 tp->nvram_jedecnum = JEDEC_ST;
12637 tg3_flag_set(tp, NVRAM_BUFFERED);
12638 tg3_flag_set(tp, FLASH);
12639 tp->nvram_pagesize = 256;
12644 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12646 u32 nvcfg1, protect = 0;
12648 nvcfg1 = tr32(NVRAM_CFG1);
12650 /* NVRAM protection for TPM */
12651 if (nvcfg1 & (1 << 27)) {
12652 tg3_flag_set(tp, PROTECTED_NVRAM);
12656 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12658 case FLASH_5761VENDOR_ATMEL_ADB021D:
12659 case FLASH_5761VENDOR_ATMEL_ADB041D:
12660 case FLASH_5761VENDOR_ATMEL_ADB081D:
12661 case FLASH_5761VENDOR_ATMEL_ADB161D:
12662 case FLASH_5761VENDOR_ATMEL_MDB021D:
12663 case FLASH_5761VENDOR_ATMEL_MDB041D:
12664 case FLASH_5761VENDOR_ATMEL_MDB081D:
12665 case FLASH_5761VENDOR_ATMEL_MDB161D:
12666 tp->nvram_jedecnum = JEDEC_ATMEL;
12667 tg3_flag_set(tp, NVRAM_BUFFERED);
12668 tg3_flag_set(tp, FLASH);
12669 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12670 tp->nvram_pagesize = 256;
12672 case FLASH_5761VENDOR_ST_A_M45PE20:
12673 case FLASH_5761VENDOR_ST_A_M45PE40:
12674 case FLASH_5761VENDOR_ST_A_M45PE80:
12675 case FLASH_5761VENDOR_ST_A_M45PE16:
12676 case FLASH_5761VENDOR_ST_M_M45PE20:
12677 case FLASH_5761VENDOR_ST_M_M45PE40:
12678 case FLASH_5761VENDOR_ST_M_M45PE80:
12679 case FLASH_5761VENDOR_ST_M_M45PE16:
12680 tp->nvram_jedecnum = JEDEC_ST;
12681 tg3_flag_set(tp, NVRAM_BUFFERED);
12682 tg3_flag_set(tp, FLASH);
12683 tp->nvram_pagesize = 256;
12688 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12691 case FLASH_5761VENDOR_ATMEL_ADB161D:
12692 case FLASH_5761VENDOR_ATMEL_MDB161D:
12693 case FLASH_5761VENDOR_ST_A_M45PE16:
12694 case FLASH_5761VENDOR_ST_M_M45PE16:
12695 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12697 case FLASH_5761VENDOR_ATMEL_ADB081D:
12698 case FLASH_5761VENDOR_ATMEL_MDB081D:
12699 case FLASH_5761VENDOR_ST_A_M45PE80:
12700 case FLASH_5761VENDOR_ST_M_M45PE80:
12701 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12703 case FLASH_5761VENDOR_ATMEL_ADB041D:
12704 case FLASH_5761VENDOR_ATMEL_MDB041D:
12705 case FLASH_5761VENDOR_ST_A_M45PE40:
12706 case FLASH_5761VENDOR_ST_M_M45PE40:
12707 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12709 case FLASH_5761VENDOR_ATMEL_ADB021D:
12710 case FLASH_5761VENDOR_ATMEL_MDB021D:
12711 case FLASH_5761VENDOR_ST_A_M45PE20:
12712 case FLASH_5761VENDOR_ST_M_M45PE20:
12713 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12719 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12721 tp->nvram_jedecnum = JEDEC_ATMEL;
12722 tg3_flag_set(tp, NVRAM_BUFFERED);
12723 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12726 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12730 nvcfg1 = tr32(NVRAM_CFG1);
12732 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12733 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12734 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12735 tp->nvram_jedecnum = JEDEC_ATMEL;
12736 tg3_flag_set(tp, NVRAM_BUFFERED);
12737 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12739 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12740 tw32(NVRAM_CFG1, nvcfg1);
12742 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12743 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12744 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12745 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12746 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12747 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12748 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12749 tp->nvram_jedecnum = JEDEC_ATMEL;
12750 tg3_flag_set(tp, NVRAM_BUFFERED);
12751 tg3_flag_set(tp, FLASH);
12753 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12754 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12755 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12756 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12757 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12759 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12760 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12761 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12763 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12764 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12765 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12769 case FLASH_5752VENDOR_ST_M45PE10:
12770 case FLASH_5752VENDOR_ST_M45PE20:
12771 case FLASH_5752VENDOR_ST_M45PE40:
12772 tp->nvram_jedecnum = JEDEC_ST;
12773 tg3_flag_set(tp, NVRAM_BUFFERED);
12774 tg3_flag_set(tp, FLASH);
12776 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12777 case FLASH_5752VENDOR_ST_M45PE10:
12778 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12780 case FLASH_5752VENDOR_ST_M45PE20:
12781 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12783 case FLASH_5752VENDOR_ST_M45PE40:
12784 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12789 tg3_flag_set(tp, NO_NVRAM);
12793 tg3_nvram_get_pagesize(tp, nvcfg1);
12794 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12795 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12799 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12803 nvcfg1 = tr32(NVRAM_CFG1);
12805 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12806 case FLASH_5717VENDOR_ATMEL_EEPROM:
12807 case FLASH_5717VENDOR_MICRO_EEPROM:
12808 tp->nvram_jedecnum = JEDEC_ATMEL;
12809 tg3_flag_set(tp, NVRAM_BUFFERED);
12810 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12812 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12813 tw32(NVRAM_CFG1, nvcfg1);
12815 case FLASH_5717VENDOR_ATMEL_MDB011D:
12816 case FLASH_5717VENDOR_ATMEL_ADB011B:
12817 case FLASH_5717VENDOR_ATMEL_ADB011D:
12818 case FLASH_5717VENDOR_ATMEL_MDB021D:
12819 case FLASH_5717VENDOR_ATMEL_ADB021B:
12820 case FLASH_5717VENDOR_ATMEL_ADB021D:
12821 case FLASH_5717VENDOR_ATMEL_45USPT:
12822 tp->nvram_jedecnum = JEDEC_ATMEL;
12823 tg3_flag_set(tp, NVRAM_BUFFERED);
12824 tg3_flag_set(tp, FLASH);
12826 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12827 case FLASH_5717VENDOR_ATMEL_MDB021D:
12828 /* Detect size with tg3_nvram_get_size() */
12830 case FLASH_5717VENDOR_ATMEL_ADB021B:
12831 case FLASH_5717VENDOR_ATMEL_ADB021D:
12832 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12835 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12839 case FLASH_5717VENDOR_ST_M_M25PE10:
12840 case FLASH_5717VENDOR_ST_A_M25PE10:
12841 case FLASH_5717VENDOR_ST_M_M45PE10:
12842 case FLASH_5717VENDOR_ST_A_M45PE10:
12843 case FLASH_5717VENDOR_ST_M_M25PE20:
12844 case FLASH_5717VENDOR_ST_A_M25PE20:
12845 case FLASH_5717VENDOR_ST_M_M45PE20:
12846 case FLASH_5717VENDOR_ST_A_M45PE20:
12847 case FLASH_5717VENDOR_ST_25USPT:
12848 case FLASH_5717VENDOR_ST_45USPT:
12849 tp->nvram_jedecnum = JEDEC_ST;
12850 tg3_flag_set(tp, NVRAM_BUFFERED);
12851 tg3_flag_set(tp, FLASH);
12853 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12854 case FLASH_5717VENDOR_ST_M_M25PE20:
12855 case FLASH_5717VENDOR_ST_M_M45PE20:
12856 /* Detect size with tg3_nvram_get_size() */
12858 case FLASH_5717VENDOR_ST_A_M25PE20:
12859 case FLASH_5717VENDOR_ST_A_M45PE20:
12860 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12863 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12868 tg3_flag_set(tp, NO_NVRAM);
12872 tg3_nvram_get_pagesize(tp, nvcfg1);
12873 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12874 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12877 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12879 u32 nvcfg1, nvmpinstrp;
12881 nvcfg1 = tr32(NVRAM_CFG1);
12882 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12884 switch (nvmpinstrp) {
12885 case FLASH_5720_EEPROM_HD:
12886 case FLASH_5720_EEPROM_LD:
12887 tp->nvram_jedecnum = JEDEC_ATMEL;
12888 tg3_flag_set(tp, NVRAM_BUFFERED);
12890 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12891 tw32(NVRAM_CFG1, nvcfg1);
12892 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12893 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12895 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12897 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12898 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12899 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12900 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12901 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12902 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12903 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12904 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12905 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12906 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12907 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12908 case FLASH_5720VENDOR_ATMEL_45USPT:
12909 tp->nvram_jedecnum = JEDEC_ATMEL;
12910 tg3_flag_set(tp, NVRAM_BUFFERED);
12911 tg3_flag_set(tp, FLASH);
12913 switch (nvmpinstrp) {
12914 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12915 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12916 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12917 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12919 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12920 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12921 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12922 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12924 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12925 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12926 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12929 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12933 case FLASH_5720VENDOR_M_ST_M25PE10:
12934 case FLASH_5720VENDOR_M_ST_M45PE10:
12935 case FLASH_5720VENDOR_A_ST_M25PE10:
12936 case FLASH_5720VENDOR_A_ST_M45PE10:
12937 case FLASH_5720VENDOR_M_ST_M25PE20:
12938 case FLASH_5720VENDOR_M_ST_M45PE20:
12939 case FLASH_5720VENDOR_A_ST_M25PE20:
12940 case FLASH_5720VENDOR_A_ST_M45PE20:
12941 case FLASH_5720VENDOR_M_ST_M25PE40:
12942 case FLASH_5720VENDOR_M_ST_M45PE40:
12943 case FLASH_5720VENDOR_A_ST_M25PE40:
12944 case FLASH_5720VENDOR_A_ST_M45PE40:
12945 case FLASH_5720VENDOR_M_ST_M25PE80:
12946 case FLASH_5720VENDOR_M_ST_M45PE80:
12947 case FLASH_5720VENDOR_A_ST_M25PE80:
12948 case FLASH_5720VENDOR_A_ST_M45PE80:
12949 case FLASH_5720VENDOR_ST_25USPT:
12950 case FLASH_5720VENDOR_ST_45USPT:
12951 tp->nvram_jedecnum = JEDEC_ST;
12952 tg3_flag_set(tp, NVRAM_BUFFERED);
12953 tg3_flag_set(tp, FLASH);
12955 switch (nvmpinstrp) {
12956 case FLASH_5720VENDOR_M_ST_M25PE20:
12957 case FLASH_5720VENDOR_M_ST_M45PE20:
12958 case FLASH_5720VENDOR_A_ST_M25PE20:
12959 case FLASH_5720VENDOR_A_ST_M45PE20:
12960 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12962 case FLASH_5720VENDOR_M_ST_M25PE40:
12963 case FLASH_5720VENDOR_M_ST_M45PE40:
12964 case FLASH_5720VENDOR_A_ST_M25PE40:
12965 case FLASH_5720VENDOR_A_ST_M45PE40:
12966 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12968 case FLASH_5720VENDOR_M_ST_M25PE80:
12969 case FLASH_5720VENDOR_M_ST_M45PE80:
12970 case FLASH_5720VENDOR_A_ST_M25PE80:
12971 case FLASH_5720VENDOR_A_ST_M45PE80:
12972 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12975 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12980 tg3_flag_set(tp, NO_NVRAM);
12984 tg3_nvram_get_pagesize(tp, nvcfg1);
12985 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12986 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12989 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12990 static void __devinit tg3_nvram_init(struct tg3 *tp)
12992 tw32_f(GRC_EEPROM_ADDR,
12993 (EEPROM_ADDR_FSM_RESET |
12994 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12995 EEPROM_ADDR_CLKPERD_SHIFT)));
12999 /* Enable seeprom accesses. */
13000 tw32_f(GRC_LOCAL_CTRL,
13001 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13004 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13005 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13006 tg3_flag_set(tp, NVRAM);
13008 if (tg3_nvram_lock(tp)) {
13009 netdev_warn(tp->dev,
13010 "Cannot get nvram lock, %s failed\n",
13014 tg3_enable_nvram_access(tp);
13016 tp->nvram_size = 0;
13018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13019 tg3_get_5752_nvram_info(tp);
13020 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13021 tg3_get_5755_nvram_info(tp);
13022 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13024 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13025 tg3_get_5787_nvram_info(tp);
13026 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13027 tg3_get_5761_nvram_info(tp);
13028 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13029 tg3_get_5906_nvram_info(tp);
13030 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13031 tg3_flag(tp, 57765_CLASS))
13032 tg3_get_57780_nvram_info(tp);
13033 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13034 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13035 tg3_get_5717_nvram_info(tp);
13036 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13037 tg3_get_5720_nvram_info(tp);
13039 tg3_get_nvram_info(tp);
13041 if (tp->nvram_size == 0)
13042 tg3_get_nvram_size(tp);
13044 tg3_disable_nvram_access(tp);
13045 tg3_nvram_unlock(tp);
13048 tg3_flag_clear(tp, NVRAM);
13049 tg3_flag_clear(tp, NVRAM_BUFFERED);
13051 tg3_get_eeprom_size(tp);
13055 struct subsys_tbl_ent {
13056 u16 subsys_vendor, subsys_devid;
13060 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13061 /* Broadcom boards. */
13062 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13063 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13064 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13065 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13066 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13067 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13068 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13069 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13070 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13071 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13072 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13073 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13074 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13075 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13076 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13077 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13078 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13079 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13080 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13081 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13082 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13083 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13086 { TG3PCI_SUBVENDOR_ID_3COM,
13087 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13088 { TG3PCI_SUBVENDOR_ID_3COM,
13089 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13090 { TG3PCI_SUBVENDOR_ID_3COM,
13091 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13092 { TG3PCI_SUBVENDOR_ID_3COM,
13093 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13094 { TG3PCI_SUBVENDOR_ID_3COM,
13095 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13098 { TG3PCI_SUBVENDOR_ID_DELL,
13099 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13100 { TG3PCI_SUBVENDOR_ID_DELL,
13101 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13102 { TG3PCI_SUBVENDOR_ID_DELL,
13103 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13104 { TG3PCI_SUBVENDOR_ID_DELL,
13105 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13107 /* Compaq boards. */
13108 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13109 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13110 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13111 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13112 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13113 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13114 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13115 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13116 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13117 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13120 { TG3PCI_SUBVENDOR_ID_IBM,
13121 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13124 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13128 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13129 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13130 tp->pdev->subsystem_vendor) &&
13131 (subsys_id_to_phy_id[i].subsys_devid ==
13132 tp->pdev->subsystem_device))
13133 return &subsys_id_to_phy_id[i];
13138 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13142 tp->phy_id = TG3_PHY_ID_INVALID;
13143 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13145 /* Assume an onboard device and WOL capable by default. */
13146 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13147 tg3_flag_set(tp, WOL_CAP);
13149 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13150 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13151 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13152 tg3_flag_set(tp, IS_NIC);
13154 val = tr32(VCPU_CFGSHDW);
13155 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13156 tg3_flag_set(tp, ASPM_WORKAROUND);
13157 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13158 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13159 tg3_flag_set(tp, WOL_ENABLE);
13160 device_set_wakeup_enable(&tp->pdev->dev, true);
13165 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13166 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13167 u32 nic_cfg, led_cfg;
13168 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13169 int eeprom_phy_serdes = 0;
13171 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13172 tp->nic_sram_data_cfg = nic_cfg;
13174 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13175 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13176 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13177 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13178 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13179 (ver > 0) && (ver < 0x100))
13180 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13182 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13183 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13185 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13186 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13187 eeprom_phy_serdes = 1;
13189 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13190 if (nic_phy_id != 0) {
13191 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13192 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13194 eeprom_phy_id = (id1 >> 16) << 10;
13195 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13196 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13200 tp->phy_id = eeprom_phy_id;
13201 if (eeprom_phy_serdes) {
13202 if (!tg3_flag(tp, 5705_PLUS))
13203 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13205 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13208 if (tg3_flag(tp, 5750_PLUS))
13209 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13210 SHASTA_EXT_LED_MODE_MASK);
13212 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13216 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13217 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13220 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13221 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13224 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13225 tp->led_ctrl = LED_CTRL_MODE_MAC;
13227 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13228 * read on some older 5700/5701 bootcode.
13230 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13232 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13234 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13238 case SHASTA_EXT_LED_SHARED:
13239 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13240 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13241 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13242 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13243 LED_CTRL_MODE_PHY_2);
13246 case SHASTA_EXT_LED_MAC:
13247 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13250 case SHASTA_EXT_LED_COMBO:
13251 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13252 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13253 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13254 LED_CTRL_MODE_PHY_2);
13259 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13260 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13261 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13262 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13264 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13265 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13267 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13268 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13269 if ((tp->pdev->subsystem_vendor ==
13270 PCI_VENDOR_ID_ARIMA) &&
13271 (tp->pdev->subsystem_device == 0x205a ||
13272 tp->pdev->subsystem_device == 0x2063))
13273 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13275 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13276 tg3_flag_set(tp, IS_NIC);
13279 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13280 tg3_flag_set(tp, ENABLE_ASF);
13281 if (tg3_flag(tp, 5750_PLUS))
13282 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13285 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13286 tg3_flag(tp, 5750_PLUS))
13287 tg3_flag_set(tp, ENABLE_APE);
13289 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13290 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13291 tg3_flag_clear(tp, WOL_CAP);
13293 if (tg3_flag(tp, WOL_CAP) &&
13294 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13295 tg3_flag_set(tp, WOL_ENABLE);
13296 device_set_wakeup_enable(&tp->pdev->dev, true);
13299 if (cfg2 & (1 << 17))
13300 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13302 /* serdes signal pre-emphasis in register 0x590 set by */
13303 /* bootcode if bit 18 is set */
13304 if (cfg2 & (1 << 18))
13305 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13307 if ((tg3_flag(tp, 57765_PLUS) ||
13308 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13309 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13310 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13311 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13313 if (tg3_flag(tp, PCI_EXPRESS) &&
13314 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13315 !tg3_flag(tp, 57765_PLUS)) {
13318 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13319 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13320 tg3_flag_set(tp, ASPM_WORKAROUND);
13323 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13324 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13325 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13326 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13327 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13328 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13331 if (tg3_flag(tp, WOL_CAP))
13332 device_set_wakeup_enable(&tp->pdev->dev,
13333 tg3_flag(tp, WOL_ENABLE));
13335 device_set_wakeup_capable(&tp->pdev->dev, false);
13338 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13343 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13344 tw32(OTP_CTRL, cmd);
13346 /* Wait for up to 1 ms for command to execute. */
13347 for (i = 0; i < 100; i++) {
13348 val = tr32(OTP_STATUS);
13349 if (val & OTP_STATUS_CMD_DONE)
13354 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13357 /* Read the gphy configuration from the OTP region of the chip. The gphy
13358 * configuration is a 32-bit value that straddles the alignment boundary.
13359 * We do two 32-bit reads and then shift and merge the results.
13361 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13363 u32 bhalf_otp, thalf_otp;
13365 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13367 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13370 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13372 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13375 thalf_otp = tr32(OTP_READ_DATA);
13377 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13379 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13382 bhalf_otp = tr32(OTP_READ_DATA);
13384 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13387 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13389 u32 adv = ADVERTISED_Autoneg;
13391 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13392 adv |= ADVERTISED_1000baseT_Half |
13393 ADVERTISED_1000baseT_Full;
13395 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13396 adv |= ADVERTISED_100baseT_Half |
13397 ADVERTISED_100baseT_Full |
13398 ADVERTISED_10baseT_Half |
13399 ADVERTISED_10baseT_Full |
13402 adv |= ADVERTISED_FIBRE;
13404 tp->link_config.advertising = adv;
13405 tp->link_config.speed = SPEED_UNKNOWN;
13406 tp->link_config.duplex = DUPLEX_UNKNOWN;
13407 tp->link_config.autoneg = AUTONEG_ENABLE;
13408 tp->link_config.active_speed = SPEED_UNKNOWN;
13409 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13414 static int __devinit tg3_phy_probe(struct tg3 *tp)
13416 u32 hw_phy_id_1, hw_phy_id_2;
13417 u32 hw_phy_id, hw_phy_id_masked;
13420 /* flow control autonegotiation is default behavior */
13421 tg3_flag_set(tp, PAUSE_AUTONEG);
13422 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13424 if (tg3_flag(tp, USE_PHYLIB))
13425 return tg3_phy_init(tp);
13427 /* Reading the PHY ID register can conflict with ASF
13428 * firmware access to the PHY hardware.
13431 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13432 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13434 /* Now read the physical PHY_ID from the chip and verify
13435 * that it is sane. If it doesn't look good, we fall back
13436 * to either the hard-coded table based PHY_ID and failing
13437 * that the value found in the eeprom area.
13439 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13440 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13442 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13443 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13444 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13446 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13449 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13450 tp->phy_id = hw_phy_id;
13451 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13452 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13454 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13456 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13457 /* Do nothing, phy ID already set up in
13458 * tg3_get_eeprom_hw_cfg().
13461 struct subsys_tbl_ent *p;
13463 /* No eeprom signature? Try the hardcoded
13464 * subsys device table.
13466 p = tg3_lookup_by_subsys(tp);
13470 tp->phy_id = p->phy_id;
13472 tp->phy_id == TG3_PHY_ID_BCM8002)
13473 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13477 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13478 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13480 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13481 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13482 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13483 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13484 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13486 tg3_phy_init_link_config(tp);
13488 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13489 !tg3_flag(tp, ENABLE_APE) &&
13490 !tg3_flag(tp, ENABLE_ASF)) {
13493 tg3_readphy(tp, MII_BMSR, &bmsr);
13494 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13495 (bmsr & BMSR_LSTATUS))
13496 goto skip_phy_reset;
13498 err = tg3_phy_reset(tp);
13502 tg3_phy_set_wirespeed(tp);
13504 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13505 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13506 tp->link_config.flowctrl);
13508 tg3_writephy(tp, MII_BMCR,
13509 BMCR_ANENABLE | BMCR_ANRESTART);
13514 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13515 err = tg3_init_5401phy_dsp(tp);
13519 err = tg3_init_5401phy_dsp(tp);
13525 static void __devinit tg3_read_vpd(struct tg3 *tp)
13528 unsigned int block_end, rosize, len;
13532 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13536 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13538 goto out_not_found;
13540 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13541 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13542 i += PCI_VPD_LRDT_TAG_SIZE;
13544 if (block_end > vpdlen)
13545 goto out_not_found;
13547 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13548 PCI_VPD_RO_KEYWORD_MFR_ID);
13550 len = pci_vpd_info_field_size(&vpd_data[j]);
13552 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13553 if (j + len > block_end || len != 4 ||
13554 memcmp(&vpd_data[j], "1028", 4))
13557 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13558 PCI_VPD_RO_KEYWORD_VENDOR0);
13562 len = pci_vpd_info_field_size(&vpd_data[j]);
13564 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13565 if (j + len > block_end)
13568 memcpy(tp->fw_ver, &vpd_data[j], len);
13569 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13573 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13574 PCI_VPD_RO_KEYWORD_PARTNO);
13576 goto out_not_found;
13578 len = pci_vpd_info_field_size(&vpd_data[i]);
13580 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13581 if (len > TG3_BPN_SIZE ||
13582 (len + i) > vpdlen)
13583 goto out_not_found;
13585 memcpy(tp->board_part_number, &vpd_data[i], len);
13589 if (tp->board_part_number[0])
13593 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13594 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13595 strcpy(tp->board_part_number, "BCM5717");
13596 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13597 strcpy(tp->board_part_number, "BCM5718");
13600 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13601 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13602 strcpy(tp->board_part_number, "BCM57780");
13603 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13604 strcpy(tp->board_part_number, "BCM57760");
13605 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13606 strcpy(tp->board_part_number, "BCM57790");
13607 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13608 strcpy(tp->board_part_number, "BCM57788");
13611 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13612 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13613 strcpy(tp->board_part_number, "BCM57761");
13614 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13615 strcpy(tp->board_part_number, "BCM57765");
13616 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13617 strcpy(tp->board_part_number, "BCM57781");
13618 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13619 strcpy(tp->board_part_number, "BCM57785");
13620 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13621 strcpy(tp->board_part_number, "BCM57791");
13622 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13623 strcpy(tp->board_part_number, "BCM57795");
13626 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13627 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13628 strcpy(tp->board_part_number, "BCM57762");
13629 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13630 strcpy(tp->board_part_number, "BCM57766");
13631 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13632 strcpy(tp->board_part_number, "BCM57782");
13633 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13634 strcpy(tp->board_part_number, "BCM57786");
13637 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13638 strcpy(tp->board_part_number, "BCM95906");
13641 strcpy(tp->board_part_number, "none");
13645 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13649 if (tg3_nvram_read(tp, offset, &val) ||
13650 (val & 0xfc000000) != 0x0c000000 ||
13651 tg3_nvram_read(tp, offset + 4, &val) ||
13658 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13660 u32 val, offset, start, ver_offset;
13662 bool newver = false;
13664 if (tg3_nvram_read(tp, 0xc, &offset) ||
13665 tg3_nvram_read(tp, 0x4, &start))
13668 offset = tg3_nvram_logical_addr(tp, offset);
13670 if (tg3_nvram_read(tp, offset, &val))
13673 if ((val & 0xfc000000) == 0x0c000000) {
13674 if (tg3_nvram_read(tp, offset + 4, &val))
13681 dst_off = strlen(tp->fw_ver);
13684 if (TG3_VER_SIZE - dst_off < 16 ||
13685 tg3_nvram_read(tp, offset + 8, &ver_offset))
13688 offset = offset + ver_offset - start;
13689 for (i = 0; i < 16; i += 4) {
13691 if (tg3_nvram_read_be32(tp, offset + i, &v))
13694 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13699 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13702 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13703 TG3_NVM_BCVER_MAJSFT;
13704 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13705 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13706 "v%d.%02d", major, minor);
13710 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13712 u32 val, major, minor;
13714 /* Use native endian representation */
13715 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13718 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13719 TG3_NVM_HWSB_CFG1_MAJSFT;
13720 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13721 TG3_NVM_HWSB_CFG1_MINSFT;
13723 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13726 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13728 u32 offset, major, minor, build;
13730 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13732 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13735 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13736 case TG3_EEPROM_SB_REVISION_0:
13737 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13739 case TG3_EEPROM_SB_REVISION_2:
13740 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13742 case TG3_EEPROM_SB_REVISION_3:
13743 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13745 case TG3_EEPROM_SB_REVISION_4:
13746 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13748 case TG3_EEPROM_SB_REVISION_5:
13749 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13751 case TG3_EEPROM_SB_REVISION_6:
13752 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13758 if (tg3_nvram_read(tp, offset, &val))
13761 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13762 TG3_EEPROM_SB_EDH_BLD_SHFT;
13763 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13764 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13765 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13767 if (minor > 99 || build > 26)
13770 offset = strlen(tp->fw_ver);
13771 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13772 " v%d.%02d", major, minor);
13775 offset = strlen(tp->fw_ver);
13776 if (offset < TG3_VER_SIZE - 1)
13777 tp->fw_ver[offset] = 'a' + build - 1;
13781 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13783 u32 val, offset, start;
13786 for (offset = TG3_NVM_DIR_START;
13787 offset < TG3_NVM_DIR_END;
13788 offset += TG3_NVM_DIRENT_SIZE) {
13789 if (tg3_nvram_read(tp, offset, &val))
13792 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13796 if (offset == TG3_NVM_DIR_END)
13799 if (!tg3_flag(tp, 5705_PLUS))
13800 start = 0x08000000;
13801 else if (tg3_nvram_read(tp, offset - 4, &start))
13804 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13805 !tg3_fw_img_is_valid(tp, offset) ||
13806 tg3_nvram_read(tp, offset + 8, &val))
13809 offset += val - start;
13811 vlen = strlen(tp->fw_ver);
13813 tp->fw_ver[vlen++] = ',';
13814 tp->fw_ver[vlen++] = ' ';
13816 for (i = 0; i < 4; i++) {
13818 if (tg3_nvram_read_be32(tp, offset, &v))
13821 offset += sizeof(v);
13823 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13824 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13828 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13833 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13839 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13842 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13843 if (apedata != APE_SEG_SIG_MAGIC)
13846 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13847 if (!(apedata & APE_FW_STATUS_READY))
13850 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13852 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13853 tg3_flag_set(tp, APE_HAS_NCSI);
13859 vlen = strlen(tp->fw_ver);
13861 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13863 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13864 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13865 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13866 (apedata & APE_FW_VERSION_BLDMSK));
13869 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13872 bool vpd_vers = false;
13874 if (tp->fw_ver[0] != 0)
13877 if (tg3_flag(tp, NO_NVRAM)) {
13878 strcat(tp->fw_ver, "sb");
13882 if (tg3_nvram_read(tp, 0, &val))
13885 if (val == TG3_EEPROM_MAGIC)
13886 tg3_read_bc_ver(tp);
13887 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13888 tg3_read_sb_ver(tp, val);
13889 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13890 tg3_read_hwsb_ver(tp);
13897 if (tg3_flag(tp, ENABLE_APE)) {
13898 if (tg3_flag(tp, ENABLE_ASF))
13899 tg3_read_dash_ver(tp);
13900 } else if (tg3_flag(tp, ENABLE_ASF)) {
13901 tg3_read_mgmtfw_ver(tp);
13905 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13908 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13910 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13911 return TG3_RX_RET_MAX_SIZE_5717;
13912 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13913 return TG3_RX_RET_MAX_SIZE_5700;
13915 return TG3_RX_RET_MAX_SIZE_5705;
13918 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13919 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13920 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13921 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13925 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13927 struct pci_dev *peer;
13928 unsigned int func, devnr = tp->pdev->devfn & ~7;
13930 for (func = 0; func < 8; func++) {
13931 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13932 if (peer && peer != tp->pdev)
13936 /* 5704 can be configured in single-port mode, set peer to
13937 * tp->pdev in that case.
13945 * We don't need to keep the refcount elevated; there's no way
13946 * to remove one half of this device without removing the other
13953 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13955 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13956 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13959 /* All devices that use the alternate
13960 * ASIC REV location have a CPMU.
13962 tg3_flag_set(tp, CPMU_PRESENT);
13964 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13965 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13966 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13967 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13968 reg = TG3PCI_GEN2_PRODID_ASICREV;
13969 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13970 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13971 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13972 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13973 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13974 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13975 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13976 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13977 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13978 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13979 reg = TG3PCI_GEN15_PRODID_ASICREV;
13981 reg = TG3PCI_PRODID_ASICREV;
13983 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13986 /* Wrong chip ID in 5752 A0. This code can be removed later
13987 * as A0 is not in production.
13989 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13990 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13992 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13993 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13994 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13995 tg3_flag_set(tp, 5717_PLUS);
13997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13999 tg3_flag_set(tp, 57765_CLASS);
14001 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14002 tg3_flag_set(tp, 57765_PLUS);
14004 /* Intentionally exclude ASIC_REV_5906 */
14005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14011 tg3_flag(tp, 57765_PLUS))
14012 tg3_flag_set(tp, 5755_PLUS);
14014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14016 tg3_flag_set(tp, 5780_CLASS);
14018 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14020 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14021 tg3_flag(tp, 5755_PLUS) ||
14022 tg3_flag(tp, 5780_CLASS))
14023 tg3_flag_set(tp, 5750_PLUS);
14025 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14026 tg3_flag(tp, 5750_PLUS))
14027 tg3_flag_set(tp, 5705_PLUS);
14030 static int __devinit tg3_get_invariants(struct tg3 *tp)
14033 u32 pci_state_reg, grc_misc_cfg;
14038 /* Force memory write invalidate off. If we leave it on,
14039 * then on 5700_BX chips we have to enable a workaround.
14040 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14041 * to match the cacheline size. The Broadcom driver have this
14042 * workaround but turns MWI off all the times so never uses
14043 * it. This seems to suggest that the workaround is insufficient.
14045 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14046 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14047 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14049 /* Important! -- Make sure register accesses are byteswapped
14050 * correctly. Also, for those chips that require it, make
14051 * sure that indirect register accesses are enabled before
14052 * the first operation.
14054 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14056 tp->misc_host_ctrl |= (misc_ctrl_reg &
14057 MISC_HOST_CTRL_CHIPREV);
14058 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14059 tp->misc_host_ctrl);
14061 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14063 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14064 * we need to disable memory and use config. cycles
14065 * only to access all registers. The 5702/03 chips
14066 * can mistakenly decode the special cycles from the
14067 * ICH chipsets as memory write cycles, causing corruption
14068 * of register and memory space. Only certain ICH bridges
14069 * will drive special cycles with non-zero data during the
14070 * address phase which can fall within the 5703's address
14071 * range. This is not an ICH bug as the PCI spec allows
14072 * non-zero address during special cycles. However, only
14073 * these ICH bridges are known to drive non-zero addresses
14074 * during special cycles.
14076 * Since special cycles do not cross PCI bridges, we only
14077 * enable this workaround if the 5703 is on the secondary
14078 * bus of these ICH bridges.
14080 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14081 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14082 static struct tg3_dev_id {
14086 } ich_chipsets[] = {
14087 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14089 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14091 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14093 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14097 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14098 struct pci_dev *bridge = NULL;
14100 while (pci_id->vendor != 0) {
14101 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14107 if (pci_id->rev != PCI_ANY_ID) {
14108 if (bridge->revision > pci_id->rev)
14111 if (bridge->subordinate &&
14112 (bridge->subordinate->number ==
14113 tp->pdev->bus->number)) {
14114 tg3_flag_set(tp, ICH_WORKAROUND);
14115 pci_dev_put(bridge);
14121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14122 static struct tg3_dev_id {
14125 } bridge_chipsets[] = {
14126 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14127 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14130 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14131 struct pci_dev *bridge = NULL;
14133 while (pci_id->vendor != 0) {
14134 bridge = pci_get_device(pci_id->vendor,
14141 if (bridge->subordinate &&
14142 (bridge->subordinate->number <=
14143 tp->pdev->bus->number) &&
14144 (bridge->subordinate->subordinate >=
14145 tp->pdev->bus->number)) {
14146 tg3_flag_set(tp, 5701_DMA_BUG);
14147 pci_dev_put(bridge);
14153 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14154 * DMA addresses > 40-bit. This bridge may have other additional
14155 * 57xx devices behind it in some 4-port NIC designs for example.
14156 * Any tg3 device found behind the bridge will also need the 40-bit
14159 if (tg3_flag(tp, 5780_CLASS)) {
14160 tg3_flag_set(tp, 40BIT_DMA_BUG);
14161 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14163 struct pci_dev *bridge = NULL;
14166 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14167 PCI_DEVICE_ID_SERVERWORKS_EPB,
14169 if (bridge && bridge->subordinate &&
14170 (bridge->subordinate->number <=
14171 tp->pdev->bus->number) &&
14172 (bridge->subordinate->subordinate >=
14173 tp->pdev->bus->number)) {
14174 tg3_flag_set(tp, 40BIT_DMA_BUG);
14175 pci_dev_put(bridge);
14181 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14183 tp->pdev_peer = tg3_find_peer(tp);
14185 /* Determine TSO capabilities */
14186 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14187 ; /* Do nothing. HW bug. */
14188 else if (tg3_flag(tp, 57765_PLUS))
14189 tg3_flag_set(tp, HW_TSO_3);
14190 else if (tg3_flag(tp, 5755_PLUS) ||
14191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14192 tg3_flag_set(tp, HW_TSO_2);
14193 else if (tg3_flag(tp, 5750_PLUS)) {
14194 tg3_flag_set(tp, HW_TSO_1);
14195 tg3_flag_set(tp, TSO_BUG);
14196 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14197 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14198 tg3_flag_clear(tp, TSO_BUG);
14199 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14200 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14201 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14202 tg3_flag_set(tp, TSO_BUG);
14203 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14204 tp->fw_needed = FIRMWARE_TG3TSO5;
14206 tp->fw_needed = FIRMWARE_TG3TSO;
14209 /* Selectively allow TSO based on operating conditions */
14210 if (tg3_flag(tp, HW_TSO_1) ||
14211 tg3_flag(tp, HW_TSO_2) ||
14212 tg3_flag(tp, HW_TSO_3) ||
14214 /* For firmware TSO, assume ASF is disabled.
14215 * We'll disable TSO later if we discover ASF
14216 * is enabled in tg3_get_eeprom_hw_cfg().
14218 tg3_flag_set(tp, TSO_CAPABLE);
14220 tg3_flag_clear(tp, TSO_CAPABLE);
14221 tg3_flag_clear(tp, TSO_BUG);
14222 tp->fw_needed = NULL;
14225 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14226 tp->fw_needed = FIRMWARE_TG3;
14230 if (tg3_flag(tp, 5750_PLUS)) {
14231 tg3_flag_set(tp, SUPPORT_MSI);
14232 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14233 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14234 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14235 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14236 tp->pdev_peer == tp->pdev))
14237 tg3_flag_clear(tp, SUPPORT_MSI);
14239 if (tg3_flag(tp, 5755_PLUS) ||
14240 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14241 tg3_flag_set(tp, 1SHOT_MSI);
14244 if (tg3_flag(tp, 57765_PLUS)) {
14245 tg3_flag_set(tp, SUPPORT_MSIX);
14246 tp->irq_max = TG3_IRQ_MAX_VECS;
14247 tg3_rss_init_dflt_indir_tbl(tp);
14251 if (tg3_flag(tp, 5755_PLUS))
14252 tg3_flag_set(tp, SHORT_DMA_BUG);
14254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14255 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14257 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14258 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14259 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14260 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14262 if (tg3_flag(tp, 57765_PLUS) &&
14263 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14264 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14266 if (!tg3_flag(tp, 5705_PLUS) ||
14267 tg3_flag(tp, 5780_CLASS) ||
14268 tg3_flag(tp, USE_JUMBO_BDFLAG))
14269 tg3_flag_set(tp, JUMBO_CAPABLE);
14271 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14274 if (pci_is_pcie(tp->pdev)) {
14277 tg3_flag_set(tp, PCI_EXPRESS);
14279 pci_read_config_word(tp->pdev,
14280 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14282 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14283 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14285 tg3_flag_clear(tp, HW_TSO_2);
14286 tg3_flag_clear(tp, TSO_CAPABLE);
14288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14290 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14291 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14292 tg3_flag_set(tp, CLKREQ_BUG);
14293 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14294 tg3_flag_set(tp, L1PLLPD_EN);
14296 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14297 /* BCM5785 devices are effectively PCIe devices, and should
14298 * follow PCIe codepaths, but do not have a PCIe capabilities
14301 tg3_flag_set(tp, PCI_EXPRESS);
14302 } else if (!tg3_flag(tp, 5705_PLUS) ||
14303 tg3_flag(tp, 5780_CLASS)) {
14304 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14305 if (!tp->pcix_cap) {
14306 dev_err(&tp->pdev->dev,
14307 "Cannot find PCI-X capability, aborting\n");
14311 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14312 tg3_flag_set(tp, PCIX_MODE);
14315 /* If we have an AMD 762 or VIA K8T800 chipset, write
14316 * reordering to the mailbox registers done by the host
14317 * controller can cause major troubles. We read back from
14318 * every mailbox register write to force the writes to be
14319 * posted to the chip in order.
14321 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14322 !tg3_flag(tp, PCI_EXPRESS))
14323 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14325 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14326 &tp->pci_cacheline_sz);
14327 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14328 &tp->pci_lat_timer);
14329 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14330 tp->pci_lat_timer < 64) {
14331 tp->pci_lat_timer = 64;
14332 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14333 tp->pci_lat_timer);
14336 /* Important! -- It is critical that the PCI-X hw workaround
14337 * situation is decided before the first MMIO register access.
14339 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14340 /* 5700 BX chips need to have their TX producer index
14341 * mailboxes written twice to workaround a bug.
14343 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14345 /* If we are in PCI-X mode, enable register write workaround.
14347 * The workaround is to use indirect register accesses
14348 * for all chip writes not to mailbox registers.
14350 if (tg3_flag(tp, PCIX_MODE)) {
14353 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14355 /* The chip can have it's power management PCI config
14356 * space registers clobbered due to this bug.
14357 * So explicitly force the chip into D0 here.
14359 pci_read_config_dword(tp->pdev,
14360 tp->pm_cap + PCI_PM_CTRL,
14362 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14363 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14364 pci_write_config_dword(tp->pdev,
14365 tp->pm_cap + PCI_PM_CTRL,
14368 /* Also, force SERR#/PERR# in PCI command. */
14369 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14370 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14371 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14375 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14376 tg3_flag_set(tp, PCI_HIGH_SPEED);
14377 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14378 tg3_flag_set(tp, PCI_32BIT);
14380 /* Chip-specific fixup from Broadcom driver */
14381 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14382 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14383 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14384 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14387 /* Default fast path register access methods */
14388 tp->read32 = tg3_read32;
14389 tp->write32 = tg3_write32;
14390 tp->read32_mbox = tg3_read32;
14391 tp->write32_mbox = tg3_write32;
14392 tp->write32_tx_mbox = tg3_write32;
14393 tp->write32_rx_mbox = tg3_write32;
14395 /* Various workaround register access methods */
14396 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14397 tp->write32 = tg3_write_indirect_reg32;
14398 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14399 (tg3_flag(tp, PCI_EXPRESS) &&
14400 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14402 * Back to back register writes can cause problems on these
14403 * chips, the workaround is to read back all reg writes
14404 * except those to mailbox regs.
14406 * See tg3_write_indirect_reg32().
14408 tp->write32 = tg3_write_flush_reg32;
14411 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14412 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14413 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14414 tp->write32_rx_mbox = tg3_write_flush_reg32;
14417 if (tg3_flag(tp, ICH_WORKAROUND)) {
14418 tp->read32 = tg3_read_indirect_reg32;
14419 tp->write32 = tg3_write_indirect_reg32;
14420 tp->read32_mbox = tg3_read_indirect_mbox;
14421 tp->write32_mbox = tg3_write_indirect_mbox;
14422 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14423 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14428 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14429 pci_cmd &= ~PCI_COMMAND_MEMORY;
14430 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14432 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14433 tp->read32_mbox = tg3_read32_mbox_5906;
14434 tp->write32_mbox = tg3_write32_mbox_5906;
14435 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14436 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14439 if (tp->write32 == tg3_write_indirect_reg32 ||
14440 (tg3_flag(tp, PCIX_MODE) &&
14441 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14442 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14443 tg3_flag_set(tp, SRAM_USE_CONFIG);
14445 /* The memory arbiter has to be enabled in order for SRAM accesses
14446 * to succeed. Normally on powerup the tg3 chip firmware will make
14447 * sure it is enabled, but other entities such as system netboot
14448 * code might disable it.
14450 val = tr32(MEMARB_MODE);
14451 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14453 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14455 tg3_flag(tp, 5780_CLASS)) {
14456 if (tg3_flag(tp, PCIX_MODE)) {
14457 pci_read_config_dword(tp->pdev,
14458 tp->pcix_cap + PCI_X_STATUS,
14460 tp->pci_fn = val & 0x7;
14462 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14463 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14464 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14465 NIC_SRAM_CPMUSTAT_SIG) {
14466 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14467 tp->pci_fn = tp->pci_fn ? 1 : 0;
14469 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14470 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14471 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14472 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14473 NIC_SRAM_CPMUSTAT_SIG) {
14474 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14475 TG3_CPMU_STATUS_FSHFT_5719;
14479 /* Get eeprom hw config before calling tg3_set_power_state().
14480 * In particular, the TG3_FLAG_IS_NIC flag must be
14481 * determined before calling tg3_set_power_state() so that
14482 * we know whether or not to switch out of Vaux power.
14483 * When the flag is set, it means that GPIO1 is used for eeprom
14484 * write protect and also implies that it is a LOM where GPIOs
14485 * are not used to switch power.
14487 tg3_get_eeprom_hw_cfg(tp);
14489 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14490 tg3_flag_clear(tp, TSO_CAPABLE);
14491 tg3_flag_clear(tp, TSO_BUG);
14492 tp->fw_needed = NULL;
14495 if (tg3_flag(tp, ENABLE_APE)) {
14496 /* Allow reads and writes to the
14497 * APE register and memory space.
14499 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14500 PCISTATE_ALLOW_APE_SHMEM_WR |
14501 PCISTATE_ALLOW_APE_PSPACE_WR;
14502 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14505 tg3_ape_lock_init(tp);
14508 /* Set up tp->grc_local_ctrl before calling
14509 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14510 * will bring 5700's external PHY out of reset.
14511 * It is also used as eeprom write protect on LOMs.
14513 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14514 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14515 tg3_flag(tp, EEPROM_WRITE_PROT))
14516 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14517 GRC_LCLCTRL_GPIO_OUTPUT1);
14518 /* Unused GPIO3 must be driven as output on 5752 because there
14519 * are no pull-up resistors on unused GPIO pins.
14521 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14522 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14525 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14526 tg3_flag(tp, 57765_CLASS))
14527 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14529 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14530 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14531 /* Turn off the debug UART. */
14532 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14533 if (tg3_flag(tp, IS_NIC))
14534 /* Keep VMain power. */
14535 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14536 GRC_LCLCTRL_GPIO_OUTPUT0;
14539 /* Switch out of Vaux if it is a NIC */
14540 tg3_pwrsrc_switch_to_vmain(tp);
14542 /* Derive initial jumbo mode from MTU assigned in
14543 * ether_setup() via the alloc_etherdev() call
14545 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14546 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14548 /* Determine WakeOnLan speed to use. */
14549 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14550 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14551 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14552 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14553 tg3_flag_clear(tp, WOL_SPEED_100MB);
14555 tg3_flag_set(tp, WOL_SPEED_100MB);
14558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14559 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14561 /* A few boards don't want Ethernet@WireSpeed phy feature */
14562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14563 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14564 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14565 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14566 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14567 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14568 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14570 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14571 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14572 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14573 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14574 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14576 if (tg3_flag(tp, 5705_PLUS) &&
14577 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14578 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14579 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14580 !tg3_flag(tp, 57765_PLUS)) {
14581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14583 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14584 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14585 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14586 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14587 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14588 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14589 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14591 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14594 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14595 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14596 tp->phy_otp = tg3_read_otp_phycfg(tp);
14597 if (tp->phy_otp == 0)
14598 tp->phy_otp = TG3_OTP_DEFAULT;
14601 if (tg3_flag(tp, CPMU_PRESENT))
14602 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14604 tp->mi_mode = MAC_MI_MODE_BASE;
14606 tp->coalesce_mode = 0;
14607 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14608 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14609 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14611 /* Set these bits to enable statistics workaround. */
14612 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14613 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14614 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14615 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14616 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14619 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14620 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14621 tg3_flag_set(tp, USE_PHYLIB);
14623 err = tg3_mdio_init(tp);
14627 /* Initialize data/descriptor byte/word swapping. */
14628 val = tr32(GRC_MODE);
14629 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14630 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14631 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14632 GRC_MODE_B2HRX_ENABLE |
14633 GRC_MODE_HTX2B_ENABLE |
14634 GRC_MODE_HOST_STACKUP);
14636 val &= GRC_MODE_HOST_STACKUP;
14638 tw32(GRC_MODE, val | tp->grc_mode);
14640 tg3_switch_clocks(tp);
14642 /* Clear this out for sanity. */
14643 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14645 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14647 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14648 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14649 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14651 if (chiprevid == CHIPREV_ID_5701_A0 ||
14652 chiprevid == CHIPREV_ID_5701_B0 ||
14653 chiprevid == CHIPREV_ID_5701_B2 ||
14654 chiprevid == CHIPREV_ID_5701_B5) {
14655 void __iomem *sram_base;
14657 /* Write some dummy words into the SRAM status block
14658 * area, see if it reads back correctly. If the return
14659 * value is bad, force enable the PCIX workaround.
14661 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14663 writel(0x00000000, sram_base);
14664 writel(0x00000000, sram_base + 4);
14665 writel(0xffffffff, sram_base + 4);
14666 if (readl(sram_base) != 0x00000000)
14667 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14672 tg3_nvram_init(tp);
14674 grc_misc_cfg = tr32(GRC_MISC_CFG);
14675 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14678 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14679 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14680 tg3_flag_set(tp, IS_5788);
14682 if (!tg3_flag(tp, IS_5788) &&
14683 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14684 tg3_flag_set(tp, TAGGED_STATUS);
14685 if (tg3_flag(tp, TAGGED_STATUS)) {
14686 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14687 HOSTCC_MODE_CLRTICK_TXBD);
14689 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14690 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14691 tp->misc_host_ctrl);
14694 /* Preserve the APE MAC_MODE bits */
14695 if (tg3_flag(tp, ENABLE_APE))
14696 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14700 /* these are limited to 10/100 only */
14701 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14702 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14703 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14704 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14705 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14706 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14707 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14708 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14709 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14710 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14711 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14712 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14713 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14714 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14715 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14716 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14718 err = tg3_phy_probe(tp);
14720 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14721 /* ... but do not return immediately ... */
14726 tg3_read_fw_ver(tp);
14728 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14729 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14732 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14734 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14737 /* 5700 {AX,BX} chips have a broken status block link
14738 * change bit implementation, so we must use the
14739 * status register in those cases.
14741 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14742 tg3_flag_set(tp, USE_LINKCHG_REG);
14744 tg3_flag_clear(tp, USE_LINKCHG_REG);
14746 /* The led_ctrl is set during tg3_phy_probe, here we might
14747 * have to force the link status polling mechanism based
14748 * upon subsystem IDs.
14750 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14751 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14752 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14753 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14754 tg3_flag_set(tp, USE_LINKCHG_REG);
14757 /* For all SERDES we poll the MAC status register. */
14758 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14759 tg3_flag_set(tp, POLL_SERDES);
14761 tg3_flag_clear(tp, POLL_SERDES);
14763 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14764 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14766 tg3_flag(tp, PCIX_MODE)) {
14767 tp->rx_offset = NET_SKB_PAD;
14768 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14769 tp->rx_copy_thresh = ~(u16)0;
14773 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14774 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14775 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14777 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14779 /* Increment the rx prod index on the rx std ring by at most
14780 * 8 for these chips to workaround hw errata.
14782 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14784 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14785 tp->rx_std_max_post = 8;
14787 if (tg3_flag(tp, ASPM_WORKAROUND))
14788 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14789 PCIE_PWR_MGMT_L1_THRESH_MSK;
14794 #ifdef CONFIG_SPARC
14795 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14797 struct net_device *dev = tp->dev;
14798 struct pci_dev *pdev = tp->pdev;
14799 struct device_node *dp = pci_device_to_OF_node(pdev);
14800 const unsigned char *addr;
14803 addr = of_get_property(dp, "local-mac-address", &len);
14804 if (addr && len == 6) {
14805 memcpy(dev->dev_addr, addr, 6);
14806 memcpy(dev->perm_addr, dev->dev_addr, 6);
14812 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14814 struct net_device *dev = tp->dev;
14816 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14817 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14822 static int __devinit tg3_get_device_address(struct tg3 *tp)
14824 struct net_device *dev = tp->dev;
14825 u32 hi, lo, mac_offset;
14828 #ifdef CONFIG_SPARC
14829 if (!tg3_get_macaddr_sparc(tp))
14834 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14835 tg3_flag(tp, 5780_CLASS)) {
14836 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14838 if (tg3_nvram_lock(tp))
14839 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14841 tg3_nvram_unlock(tp);
14842 } else if (tg3_flag(tp, 5717_PLUS)) {
14843 if (tp->pci_fn & 1)
14845 if (tp->pci_fn > 1)
14846 mac_offset += 0x18c;
14847 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14850 /* First try to get it from MAC address mailbox. */
14851 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14852 if ((hi >> 16) == 0x484b) {
14853 dev->dev_addr[0] = (hi >> 8) & 0xff;
14854 dev->dev_addr[1] = (hi >> 0) & 0xff;
14856 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14857 dev->dev_addr[2] = (lo >> 24) & 0xff;
14858 dev->dev_addr[3] = (lo >> 16) & 0xff;
14859 dev->dev_addr[4] = (lo >> 8) & 0xff;
14860 dev->dev_addr[5] = (lo >> 0) & 0xff;
14862 /* Some old bootcode may report a 0 MAC address in SRAM */
14863 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14866 /* Next, try NVRAM. */
14867 if (!tg3_flag(tp, NO_NVRAM) &&
14868 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14869 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14870 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14871 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14873 /* Finally just fetch it out of the MAC control regs. */
14875 hi = tr32(MAC_ADDR_0_HIGH);
14876 lo = tr32(MAC_ADDR_0_LOW);
14878 dev->dev_addr[5] = lo & 0xff;
14879 dev->dev_addr[4] = (lo >> 8) & 0xff;
14880 dev->dev_addr[3] = (lo >> 16) & 0xff;
14881 dev->dev_addr[2] = (lo >> 24) & 0xff;
14882 dev->dev_addr[1] = hi & 0xff;
14883 dev->dev_addr[0] = (hi >> 8) & 0xff;
14887 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14888 #ifdef CONFIG_SPARC
14889 if (!tg3_get_default_macaddr_sparc(tp))
14894 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14898 #define BOUNDARY_SINGLE_CACHELINE 1
14899 #define BOUNDARY_MULTI_CACHELINE 2
14901 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14903 int cacheline_size;
14907 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14909 cacheline_size = 1024;
14911 cacheline_size = (int) byte * 4;
14913 /* On 5703 and later chips, the boundary bits have no
14916 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14917 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14918 !tg3_flag(tp, PCI_EXPRESS))
14921 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14922 goal = BOUNDARY_MULTI_CACHELINE;
14924 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14925 goal = BOUNDARY_SINGLE_CACHELINE;
14931 if (tg3_flag(tp, 57765_PLUS)) {
14932 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14939 /* PCI controllers on most RISC systems tend to disconnect
14940 * when a device tries to burst across a cache-line boundary.
14941 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14943 * Unfortunately, for PCI-E there are only limited
14944 * write-side controls for this, and thus for reads
14945 * we will still get the disconnects. We'll also waste
14946 * these PCI cycles for both read and write for chips
14947 * other than 5700 and 5701 which do not implement the
14950 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14951 switch (cacheline_size) {
14956 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14957 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14958 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14960 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14961 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14966 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14967 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14971 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14972 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14975 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14976 switch (cacheline_size) {
14980 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14981 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14982 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14988 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14989 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14993 switch (cacheline_size) {
14995 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14996 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14997 DMA_RWCTRL_WRITE_BNDRY_16);
15002 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15003 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15004 DMA_RWCTRL_WRITE_BNDRY_32);
15009 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15010 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15011 DMA_RWCTRL_WRITE_BNDRY_64);
15016 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15017 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15018 DMA_RWCTRL_WRITE_BNDRY_128);
15023 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15024 DMA_RWCTRL_WRITE_BNDRY_256);
15027 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15028 DMA_RWCTRL_WRITE_BNDRY_512);
15032 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15033 DMA_RWCTRL_WRITE_BNDRY_1024);
15042 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15044 struct tg3_internal_buffer_desc test_desc;
15045 u32 sram_dma_descs;
15048 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15050 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15051 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15052 tw32(RDMAC_STATUS, 0);
15053 tw32(WDMAC_STATUS, 0);
15055 tw32(BUFMGR_MODE, 0);
15056 tw32(FTQ_RESET, 0);
15058 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15059 test_desc.addr_lo = buf_dma & 0xffffffff;
15060 test_desc.nic_mbuf = 0x00002100;
15061 test_desc.len = size;
15064 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15065 * the *second* time the tg3 driver was getting loaded after an
15068 * Broadcom tells me:
15069 * ...the DMA engine is connected to the GRC block and a DMA
15070 * reset may affect the GRC block in some unpredictable way...
15071 * The behavior of resets to individual blocks has not been tested.
15073 * Broadcom noted the GRC reset will also reset all sub-components.
15076 test_desc.cqid_sqid = (13 << 8) | 2;
15078 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15081 test_desc.cqid_sqid = (16 << 8) | 7;
15083 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15086 test_desc.flags = 0x00000005;
15088 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15091 val = *(((u32 *)&test_desc) + i);
15092 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15093 sram_dma_descs + (i * sizeof(u32)));
15094 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15096 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15099 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15101 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15104 for (i = 0; i < 40; i++) {
15108 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15110 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15111 if ((val & 0xffff) == sram_dma_descs) {
15122 #define TEST_BUFFER_SIZE 0x2000
15124 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15125 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15129 static int __devinit tg3_test_dma(struct tg3 *tp)
15131 dma_addr_t buf_dma;
15132 u32 *buf, saved_dma_rwctrl;
15135 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15136 &buf_dma, GFP_KERNEL);
15142 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15143 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15145 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15147 if (tg3_flag(tp, 57765_PLUS))
15150 if (tg3_flag(tp, PCI_EXPRESS)) {
15151 /* DMA read watermark not used on PCIE */
15152 tp->dma_rwctrl |= 0x00180000;
15153 } else if (!tg3_flag(tp, PCIX_MODE)) {
15154 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15155 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15156 tp->dma_rwctrl |= 0x003f0000;
15158 tp->dma_rwctrl |= 0x003f000f;
15160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15162 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15163 u32 read_water = 0x7;
15165 /* If the 5704 is behind the EPB bridge, we can
15166 * do the less restrictive ONE_DMA workaround for
15167 * better performance.
15169 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15171 tp->dma_rwctrl |= 0x8000;
15172 else if (ccval == 0x6 || ccval == 0x7)
15173 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15175 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15177 /* Set bit 23 to enable PCIX hw bug fix */
15179 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15180 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15182 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15183 /* 5780 always in PCIX mode */
15184 tp->dma_rwctrl |= 0x00144000;
15185 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15186 /* 5714 always in PCIX mode */
15187 tp->dma_rwctrl |= 0x00148000;
15189 tp->dma_rwctrl |= 0x001b000f;
15193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15194 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15195 tp->dma_rwctrl &= 0xfffffff0;
15197 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15198 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15199 /* Remove this if it causes problems for some boards. */
15200 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15202 /* On 5700/5701 chips, we need to set this bit.
15203 * Otherwise the chip will issue cacheline transactions
15204 * to streamable DMA memory with not all the byte
15205 * enables turned on. This is an error on several
15206 * RISC PCI controllers, in particular sparc64.
15208 * On 5703/5704 chips, this bit has been reassigned
15209 * a different meaning. In particular, it is used
15210 * on those chips to enable a PCI-X workaround.
15212 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15215 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15218 /* Unneeded, already done by tg3_get_invariants. */
15219 tg3_switch_clocks(tp);
15222 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15223 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15226 /* It is best to perform DMA test with maximum write burst size
15227 * to expose the 5700/5701 write DMA bug.
15229 saved_dma_rwctrl = tp->dma_rwctrl;
15230 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15231 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15236 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15239 /* Send the buffer to the chip. */
15240 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15242 dev_err(&tp->pdev->dev,
15243 "%s: Buffer write failed. err = %d\n",
15249 /* validate data reached card RAM correctly. */
15250 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15252 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15253 if (le32_to_cpu(val) != p[i]) {
15254 dev_err(&tp->pdev->dev,
15255 "%s: Buffer corrupted on device! "
15256 "(%d != %d)\n", __func__, val, i);
15257 /* ret = -ENODEV here? */
15262 /* Now read it back. */
15263 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15265 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15266 "err = %d\n", __func__, ret);
15271 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15275 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15276 DMA_RWCTRL_WRITE_BNDRY_16) {
15277 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15278 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15279 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15282 dev_err(&tp->pdev->dev,
15283 "%s: Buffer corrupted on read back! "
15284 "(%d != %d)\n", __func__, p[i], i);
15290 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15296 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15297 DMA_RWCTRL_WRITE_BNDRY_16) {
15298 /* DMA test passed without adjusting DMA boundary,
15299 * now look for chipsets that are known to expose the
15300 * DMA bug without failing the test.
15302 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15303 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15304 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15306 /* Safe to use the calculated DMA boundary. */
15307 tp->dma_rwctrl = saved_dma_rwctrl;
15310 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15314 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15319 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15321 if (tg3_flag(tp, 57765_PLUS)) {
15322 tp->bufmgr_config.mbuf_read_dma_low_water =
15323 DEFAULT_MB_RDMA_LOW_WATER_5705;
15324 tp->bufmgr_config.mbuf_mac_rx_low_water =
15325 DEFAULT_MB_MACRX_LOW_WATER_57765;
15326 tp->bufmgr_config.mbuf_high_water =
15327 DEFAULT_MB_HIGH_WATER_57765;
15329 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15330 DEFAULT_MB_RDMA_LOW_WATER_5705;
15331 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15332 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15333 tp->bufmgr_config.mbuf_high_water_jumbo =
15334 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15335 } else if (tg3_flag(tp, 5705_PLUS)) {
15336 tp->bufmgr_config.mbuf_read_dma_low_water =
15337 DEFAULT_MB_RDMA_LOW_WATER_5705;
15338 tp->bufmgr_config.mbuf_mac_rx_low_water =
15339 DEFAULT_MB_MACRX_LOW_WATER_5705;
15340 tp->bufmgr_config.mbuf_high_water =
15341 DEFAULT_MB_HIGH_WATER_5705;
15342 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15343 tp->bufmgr_config.mbuf_mac_rx_low_water =
15344 DEFAULT_MB_MACRX_LOW_WATER_5906;
15345 tp->bufmgr_config.mbuf_high_water =
15346 DEFAULT_MB_HIGH_WATER_5906;
15349 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15350 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15351 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15352 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15353 tp->bufmgr_config.mbuf_high_water_jumbo =
15354 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15356 tp->bufmgr_config.mbuf_read_dma_low_water =
15357 DEFAULT_MB_RDMA_LOW_WATER;
15358 tp->bufmgr_config.mbuf_mac_rx_low_water =
15359 DEFAULT_MB_MACRX_LOW_WATER;
15360 tp->bufmgr_config.mbuf_high_water =
15361 DEFAULT_MB_HIGH_WATER;
15363 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15364 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15365 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15366 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15367 tp->bufmgr_config.mbuf_high_water_jumbo =
15368 DEFAULT_MB_HIGH_WATER_JUMBO;
15371 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15372 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15375 static char * __devinit tg3_phy_string(struct tg3 *tp)
15377 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15378 case TG3_PHY_ID_BCM5400: return "5400";
15379 case TG3_PHY_ID_BCM5401: return "5401";
15380 case TG3_PHY_ID_BCM5411: return "5411";
15381 case TG3_PHY_ID_BCM5701: return "5701";
15382 case TG3_PHY_ID_BCM5703: return "5703";
15383 case TG3_PHY_ID_BCM5704: return "5704";
15384 case TG3_PHY_ID_BCM5705: return "5705";
15385 case TG3_PHY_ID_BCM5750: return "5750";
15386 case TG3_PHY_ID_BCM5752: return "5752";
15387 case TG3_PHY_ID_BCM5714: return "5714";
15388 case TG3_PHY_ID_BCM5780: return "5780";
15389 case TG3_PHY_ID_BCM5755: return "5755";
15390 case TG3_PHY_ID_BCM5787: return "5787";
15391 case TG3_PHY_ID_BCM5784: return "5784";
15392 case TG3_PHY_ID_BCM5756: return "5722/5756";
15393 case TG3_PHY_ID_BCM5906: return "5906";
15394 case TG3_PHY_ID_BCM5761: return "5761";
15395 case TG3_PHY_ID_BCM5718C: return "5718C";
15396 case TG3_PHY_ID_BCM5718S: return "5718S";
15397 case TG3_PHY_ID_BCM57765: return "57765";
15398 case TG3_PHY_ID_BCM5719C: return "5719C";
15399 case TG3_PHY_ID_BCM5720C: return "5720C";
15400 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15401 case 0: return "serdes";
15402 default: return "unknown";
15406 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15408 if (tg3_flag(tp, PCI_EXPRESS)) {
15409 strcpy(str, "PCI Express");
15411 } else if (tg3_flag(tp, PCIX_MODE)) {
15412 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15414 strcpy(str, "PCIX:");
15416 if ((clock_ctrl == 7) ||
15417 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15418 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15419 strcat(str, "133MHz");
15420 else if (clock_ctrl == 0)
15421 strcat(str, "33MHz");
15422 else if (clock_ctrl == 2)
15423 strcat(str, "50MHz");
15424 else if (clock_ctrl == 4)
15425 strcat(str, "66MHz");
15426 else if (clock_ctrl == 6)
15427 strcat(str, "100MHz");
15429 strcpy(str, "PCI:");
15430 if (tg3_flag(tp, PCI_HIGH_SPEED))
15431 strcat(str, "66MHz");
15433 strcat(str, "33MHz");
15435 if (tg3_flag(tp, PCI_32BIT))
15436 strcat(str, ":32-bit");
15438 strcat(str, ":64-bit");
15442 static void __devinit tg3_init_coal(struct tg3 *tp)
15444 struct ethtool_coalesce *ec = &tp->coal;
15446 memset(ec, 0, sizeof(*ec));
15447 ec->cmd = ETHTOOL_GCOALESCE;
15448 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15449 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15450 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15451 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15452 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15453 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15454 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15455 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15456 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15458 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15459 HOSTCC_MODE_CLRTICK_TXBD)) {
15460 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15461 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15462 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15463 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15466 if (tg3_flag(tp, 5705_PLUS)) {
15467 ec->rx_coalesce_usecs_irq = 0;
15468 ec->tx_coalesce_usecs_irq = 0;
15469 ec->stats_block_coalesce_usecs = 0;
15473 static int __devinit tg3_init_one(struct pci_dev *pdev,
15474 const struct pci_device_id *ent)
15476 struct net_device *dev;
15478 int i, err, pm_cap;
15479 u32 sndmbx, rcvmbx, intmbx;
15481 u64 dma_mask, persist_dma_mask;
15482 netdev_features_t features = 0;
15484 printk_once(KERN_INFO "%s\n", version);
15486 err = pci_enable_device(pdev);
15488 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15492 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15494 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15495 goto err_out_disable_pdev;
15498 pci_set_master(pdev);
15500 /* Find power-management capability. */
15501 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15503 dev_err(&pdev->dev,
15504 "Cannot find Power Management capability, aborting\n");
15506 goto err_out_free_res;
15509 err = pci_set_power_state(pdev, PCI_D0);
15511 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15512 goto err_out_free_res;
15515 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15518 goto err_out_power_down;
15521 SET_NETDEV_DEV(dev, &pdev->dev);
15523 tp = netdev_priv(dev);
15526 tp->pm_cap = pm_cap;
15527 tp->rx_mode = TG3_DEF_RX_MODE;
15528 tp->tx_mode = TG3_DEF_TX_MODE;
15531 tp->msg_enable = tg3_debug;
15533 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15535 /* The word/byte swap controls here control register access byte
15536 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15539 tp->misc_host_ctrl =
15540 MISC_HOST_CTRL_MASK_PCI_INT |
15541 MISC_HOST_CTRL_WORD_SWAP |
15542 MISC_HOST_CTRL_INDIR_ACCESS |
15543 MISC_HOST_CTRL_PCISTATE_RW;
15545 /* The NONFRM (non-frame) byte/word swap controls take effect
15546 * on descriptor entries, anything which isn't packet data.
15548 * The StrongARM chips on the board (one for tx, one for rx)
15549 * are running in big-endian mode.
15551 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15552 GRC_MODE_WSWAP_NONFRM_DATA);
15553 #ifdef __BIG_ENDIAN
15554 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15556 spin_lock_init(&tp->lock);
15557 spin_lock_init(&tp->indirect_lock);
15558 INIT_WORK(&tp->reset_task, tg3_reset_task);
15560 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15562 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15564 goto err_out_free_dev;
15567 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15568 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15569 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15570 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15571 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15572 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15573 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15574 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15575 tg3_flag_set(tp, ENABLE_APE);
15576 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15577 if (!tp->aperegs) {
15578 dev_err(&pdev->dev,
15579 "Cannot map APE registers, aborting\n");
15581 goto err_out_iounmap;
15585 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15586 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15588 dev->ethtool_ops = &tg3_ethtool_ops;
15589 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15590 dev->netdev_ops = &tg3_netdev_ops;
15591 dev->irq = pdev->irq;
15593 err = tg3_get_invariants(tp);
15595 dev_err(&pdev->dev,
15596 "Problem fetching invariants of chip, aborting\n");
15597 goto err_out_apeunmap;
15600 /* The EPB bridge inside 5714, 5715, and 5780 and any
15601 * device behind the EPB cannot support DMA addresses > 40-bit.
15602 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15603 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15604 * do DMA address check in tg3_start_xmit().
15606 if (tg3_flag(tp, IS_5788))
15607 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15608 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15609 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15610 #ifdef CONFIG_HIGHMEM
15611 dma_mask = DMA_BIT_MASK(64);
15614 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15616 /* Configure DMA attributes. */
15617 if (dma_mask > DMA_BIT_MASK(32)) {
15618 err = pci_set_dma_mask(pdev, dma_mask);
15620 features |= NETIF_F_HIGHDMA;
15621 err = pci_set_consistent_dma_mask(pdev,
15624 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15625 "DMA for consistent allocations\n");
15626 goto err_out_apeunmap;
15630 if (err || dma_mask == DMA_BIT_MASK(32)) {
15631 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15633 dev_err(&pdev->dev,
15634 "No usable DMA configuration, aborting\n");
15635 goto err_out_apeunmap;
15639 tg3_init_bufmgr_config(tp);
15641 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15643 /* 5700 B0 chips do not support checksumming correctly due
15644 * to hardware bugs.
15646 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15647 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15649 if (tg3_flag(tp, 5755_PLUS))
15650 features |= NETIF_F_IPV6_CSUM;
15653 /* TSO is on by default on chips that support hardware TSO.
15654 * Firmware TSO on older chips gives lower performance, so it
15655 * is off by default, but can be enabled using ethtool.
15657 if ((tg3_flag(tp, HW_TSO_1) ||
15658 tg3_flag(tp, HW_TSO_2) ||
15659 tg3_flag(tp, HW_TSO_3)) &&
15660 (features & NETIF_F_IP_CSUM))
15661 features |= NETIF_F_TSO;
15662 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15663 if (features & NETIF_F_IPV6_CSUM)
15664 features |= NETIF_F_TSO6;
15665 if (tg3_flag(tp, HW_TSO_3) ||
15666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15667 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15668 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15669 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15671 features |= NETIF_F_TSO_ECN;
15674 dev->features |= features;
15675 dev->vlan_features |= features;
15678 * Add loopback capability only for a subset of devices that support
15679 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15680 * loopback for the remaining devices.
15682 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15683 !tg3_flag(tp, CPMU_PRESENT))
15684 /* Add the loopback capability */
15685 features |= NETIF_F_LOOPBACK;
15687 dev->hw_features |= features;
15689 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15690 !tg3_flag(tp, TSO_CAPABLE) &&
15691 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15692 tg3_flag_set(tp, MAX_RXPEND_64);
15693 tp->rx_pending = 63;
15696 err = tg3_get_device_address(tp);
15698 dev_err(&pdev->dev,
15699 "Could not obtain valid ethernet address, aborting\n");
15700 goto err_out_apeunmap;
15704 * Reset chip in case UNDI or EFI driver did not shutdown
15705 * DMA self test will enable WDMAC and we'll see (spurious)
15706 * pending DMA on the PCI bus at that point.
15708 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15709 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15710 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15711 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15714 err = tg3_test_dma(tp);
15716 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15717 goto err_out_apeunmap;
15720 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15721 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15722 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15723 for (i = 0; i < tp->irq_max; i++) {
15724 struct tg3_napi *tnapi = &tp->napi[i];
15727 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15729 tnapi->int_mbox = intmbx;
15735 tnapi->consmbox = rcvmbx;
15736 tnapi->prodmbox = sndmbx;
15739 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15741 tnapi->coal_now = HOSTCC_MODE_NOW;
15743 if (!tg3_flag(tp, SUPPORT_MSIX))
15747 * If we support MSIX, we'll be using RSS. If we're using
15748 * RSS, the first vector only handles link interrupts and the
15749 * remaining vectors handle rx and tx interrupts. Reuse the
15750 * mailbox values for the next iteration. The values we setup
15751 * above are still useful for the single vectored mode.
15766 pci_set_drvdata(pdev, dev);
15768 if (tg3_flag(tp, 5717_PLUS)) {
15769 /* Resume a low-power mode */
15770 tg3_frob_aux_power(tp, false);
15773 tg3_timer_init(tp);
15775 err = register_netdev(dev);
15777 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15778 goto err_out_apeunmap;
15781 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15782 tp->board_part_number,
15783 tp->pci_chip_rev_id,
15784 tg3_bus_string(tp, str),
15787 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15788 struct phy_device *phydev;
15789 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15791 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15792 phydev->drv->name, dev_name(&phydev->dev));
15796 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15797 ethtype = "10/100Base-TX";
15798 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15799 ethtype = "1000Base-SX";
15801 ethtype = "10/100/1000Base-T";
15803 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15804 "(WireSpeed[%d], EEE[%d])\n",
15805 tg3_phy_string(tp), ethtype,
15806 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15807 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15810 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15811 (dev->features & NETIF_F_RXCSUM) != 0,
15812 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15813 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15814 tg3_flag(tp, ENABLE_ASF) != 0,
15815 tg3_flag(tp, TSO_CAPABLE) != 0);
15816 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15818 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15819 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15821 pci_save_state(pdev);
15827 iounmap(tp->aperegs);
15828 tp->aperegs = NULL;
15840 err_out_power_down:
15841 pci_set_power_state(pdev, PCI_D3hot);
15844 pci_release_regions(pdev);
15846 err_out_disable_pdev:
15847 pci_disable_device(pdev);
15848 pci_set_drvdata(pdev, NULL);
15852 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15854 struct net_device *dev = pci_get_drvdata(pdev);
15857 struct tg3 *tp = netdev_priv(dev);
15860 release_firmware(tp->fw);
15862 tg3_reset_task_cancel(tp);
15864 if (tg3_flag(tp, USE_PHYLIB)) {
15869 unregister_netdev(dev);
15871 iounmap(tp->aperegs);
15872 tp->aperegs = NULL;
15879 pci_release_regions(pdev);
15880 pci_disable_device(pdev);
15881 pci_set_drvdata(pdev, NULL);
15885 #ifdef CONFIG_PM_SLEEP
15886 static int tg3_suspend(struct device *device)
15888 struct pci_dev *pdev = to_pci_dev(device);
15889 struct net_device *dev = pci_get_drvdata(pdev);
15890 struct tg3 *tp = netdev_priv(dev);
15893 if (!netif_running(dev))
15896 tg3_reset_task_cancel(tp);
15898 tg3_netif_stop(tp);
15900 tg3_timer_stop(tp);
15902 tg3_full_lock(tp, 1);
15903 tg3_disable_ints(tp);
15904 tg3_full_unlock(tp);
15906 netif_device_detach(dev);
15908 tg3_full_lock(tp, 0);
15909 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15910 tg3_flag_clear(tp, INIT_COMPLETE);
15911 tg3_full_unlock(tp);
15913 err = tg3_power_down_prepare(tp);
15917 tg3_full_lock(tp, 0);
15919 tg3_flag_set(tp, INIT_COMPLETE);
15920 err2 = tg3_restart_hw(tp, 1);
15924 tg3_timer_start(tp);
15926 netif_device_attach(dev);
15927 tg3_netif_start(tp);
15930 tg3_full_unlock(tp);
15939 static int tg3_resume(struct device *device)
15941 struct pci_dev *pdev = to_pci_dev(device);
15942 struct net_device *dev = pci_get_drvdata(pdev);
15943 struct tg3 *tp = netdev_priv(dev);
15946 if (!netif_running(dev))
15949 netif_device_attach(dev);
15951 tg3_full_lock(tp, 0);
15953 tg3_flag_set(tp, INIT_COMPLETE);
15954 err = tg3_restart_hw(tp, 1);
15958 tg3_timer_start(tp);
15960 tg3_netif_start(tp);
15963 tg3_full_unlock(tp);
15971 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15972 #define TG3_PM_OPS (&tg3_pm_ops)
15976 #define TG3_PM_OPS NULL
15978 #endif /* CONFIG_PM_SLEEP */
15981 * tg3_io_error_detected - called when PCI error is detected
15982 * @pdev: Pointer to PCI device
15983 * @state: The current pci connection state
15985 * This function is called after a PCI bus error affecting
15986 * this device has been detected.
15988 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15989 pci_channel_state_t state)
15991 struct net_device *netdev = pci_get_drvdata(pdev);
15992 struct tg3 *tp = netdev_priv(netdev);
15993 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15995 netdev_info(netdev, "PCI I/O error detected\n");
15999 if (!netif_running(netdev))
16004 tg3_netif_stop(tp);
16006 tg3_timer_stop(tp);
16008 /* Want to make sure that the reset task doesn't run */
16009 tg3_reset_task_cancel(tp);
16011 netif_device_detach(netdev);
16013 /* Clean up software state, even if MMIO is blocked */
16014 tg3_full_lock(tp, 0);
16015 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16016 tg3_full_unlock(tp);
16019 if (state == pci_channel_io_perm_failure)
16020 err = PCI_ERS_RESULT_DISCONNECT;
16022 pci_disable_device(pdev);
16030 * tg3_io_slot_reset - called after the pci bus has been reset.
16031 * @pdev: Pointer to PCI device
16033 * Restart the card from scratch, as if from a cold-boot.
16034 * At this point, the card has exprienced a hard reset,
16035 * followed by fixups by BIOS, and has its config space
16036 * set up identically to what it was at cold boot.
16038 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16040 struct net_device *netdev = pci_get_drvdata(pdev);
16041 struct tg3 *tp = netdev_priv(netdev);
16042 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16047 if (pci_enable_device(pdev)) {
16048 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16052 pci_set_master(pdev);
16053 pci_restore_state(pdev);
16054 pci_save_state(pdev);
16056 if (!netif_running(netdev)) {
16057 rc = PCI_ERS_RESULT_RECOVERED;
16061 err = tg3_power_up(tp);
16065 rc = PCI_ERS_RESULT_RECOVERED;
16074 * tg3_io_resume - called when traffic can start flowing again.
16075 * @pdev: Pointer to PCI device
16077 * This callback is called when the error recovery driver tells
16078 * us that its OK to resume normal operation.
16080 static void tg3_io_resume(struct pci_dev *pdev)
16082 struct net_device *netdev = pci_get_drvdata(pdev);
16083 struct tg3 *tp = netdev_priv(netdev);
16088 if (!netif_running(netdev))
16091 tg3_full_lock(tp, 0);
16092 tg3_flag_set(tp, INIT_COMPLETE);
16093 err = tg3_restart_hw(tp, 1);
16094 tg3_full_unlock(tp);
16096 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16100 netif_device_attach(netdev);
16102 tg3_timer_start(tp);
16104 tg3_netif_start(tp);
16112 static struct pci_error_handlers tg3_err_handler = {
16113 .error_detected = tg3_io_error_detected,
16114 .slot_reset = tg3_io_slot_reset,
16115 .resume = tg3_io_resume
16118 static struct pci_driver tg3_driver = {
16119 .name = DRV_MODULE_NAME,
16120 .id_table = tg3_pci_tbl,
16121 .probe = tg3_init_one,
16122 .remove = __devexit_p(tg3_remove_one),
16123 .err_handler = &tg3_err_handler,
16124 .driver.pm = TG3_PM_OPS,
16127 static int __init tg3_init(void)
16129 return pci_register_driver(&tg3_driver);
16132 static void __exit tg3_cleanup(void)
16134 pci_unregister_driver(&tg3_driver);
16137 module_init(tg3_init);
16138 module_exit(tg3_cleanup);